From 32c7722d73130f37019b91ef6cb90109879dfced Mon Sep 17 00:00:00 2001 From: Trantorian Date: Fri, 23 Feb 2024 12:14:38 +0000 Subject: [PATCH 1/7] perf(state_commitment): :zap: improved state commitment perf State commitment now calculates hashes in parallel, with bonsai trie comitments being run in a blocking-safe thread. --- Cargo.lock | 11 +++ Cargo.toml | 1 + crates/client/db/src/bonsai_db.rs | 5 +- crates/client/deoxys/Cargo.toml | 1 + .../client/deoxys/src/commitments/events.rs | 79 +++++++++++++----- crates/client/deoxys/src/commitments/lib.rs | 17 ++-- .../deoxys/src/commitments/transactions.rs | 37 ++++++--- crates/client/deoxys/src/l2.rs | 6 +- crates/client/deoxys/src/utils/convert.rs | 82 +++++++++---------- crates/primitives/transactions/src/lib.rs | 22 ++--- 10 files changed, 163 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 735b3ac932..725a1e3c16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2248,6 +2248,16 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-skiplist" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.18" @@ -5947,6 +5957,7 @@ dependencies = [ "bitvec", "blockifier", "bonsai-trie", + "crossbeam-skiplist", "env_logger", "ethers", "futures", diff --git a/Cargo.toml b/Cargo.toml index 073bac039f..4899fcc197 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -394,6 +394,7 @@ tower = "0.4" tracing = "0.1.37" tracing-subscriber = "0.3.16" validator = "0.12" +crossbeam-skiplist = "0.1" [patch."https://github.com/w3f/ring-vrf"] bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf?rev=3ddc20", version = "0.0.4", rev = "3ddc20" } diff --git a/crates/client/db/src/bonsai_db.rs b/crates/client/db/src/bonsai_db.rs index 5519a7abe1..d580dfe503 100644 --- a/crates/client/db/src/bonsai_db.rs +++ b/crates/client/db/src/bonsai_db.rs @@ -9,6 +9,7 @@ use sp_runtime::traits::Block as BlockT; use crate::error::BonsaiDbError; /// Represents a Bonsai database instance parameterized by a block type. +#[derive(Clone)] pub struct BonsaiDb { /// Database interface for key-value operations. pub(crate) db: Arc, @@ -16,7 +17,7 @@ pub struct BonsaiDb { pub(crate) _marker: PhantomData, } -impl BonsaiDatabase for &BonsaiDb { +impl BonsaiDatabase for BonsaiDb { type Batch = DBTransaction; type DatabaseError = BonsaiDbError; @@ -174,7 +175,7 @@ impl BonsaiDatabase for TransactionWrapper { } /// This implementation is a stub to mute any error but is is currently not used. -impl BonsaiPersistentDatabase for &BonsaiDb { +impl BonsaiPersistentDatabase for BonsaiDb { type Transaction = TransactionWrapper; type DatabaseError = BonsaiDbError; diff --git a/crates/client/deoxys/Cargo.toml b/crates/client/deoxys/Cargo.toml index 92f745bc89..24006779b7 100644 --- a/crates/client/deoxys/Cargo.toml +++ b/crates/client/deoxys/Cargo.toml @@ -42,6 +42,7 @@ serde = { workspace = true, default-features = true } tokio = { workspace = true, features = ["macros", "parking_lot", "test-util"] } url = { workspace = true } validator = { workspace = true, features = ["derive"] } +crossbeam-skiplist ={ workspace = true } madara-runtime = { workspace = true } parity-scale-codec = { workspace = true, features = ["derive"] } diff --git a/crates/client/deoxys/src/commitments/events.rs b/crates/client/deoxys/src/commitments/events.rs index 2be64c12a4..9684a99ba6 100644 --- a/crates/client/deoxys/src/commitments/events.rs +++ b/crates/client/deoxys/src/commitments/events.rs @@ -1,9 +1,10 @@ use std::sync::Arc; -use anyhow::Ok; use bitvec::vec::BitVec; use bonsai_trie::id::BasicIdBuilder; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; +use crossbeam_skiplist::SkipMap; +use lazy_static::lazy_static; use mc_db::bonsai_db::BonsaiDb; use mp_felt::Felt252Wrapper; use mp_hashers::HasherT; @@ -12,6 +13,7 @@ use starknet_api::transaction::Event; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; +use tokio::task::{spawn_blocking, JoinSet}; /// Calculate the hash of an event. /// @@ -53,40 +55,75 @@ pub fn calculate_event_hash(event: &Event) -> FieldElement { /// # Returns /// /// The merkle root of the merkle tree built from the events. -pub(crate) fn event_commitment( +pub(crate) async fn event_commitment( events: &[Event], - backend: &Arc>, -) -> Result + backend: Arc>, +) -> Result where B: BlockT, H: HasherT, { - if events.len() > 0 { - let config = BonsaiStorageConfig::default(); - let bonsai_db = backend.as_ref(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); + if events.is_empty() { + return Ok(Felt252Wrapper::ZERO); + } + + let config = BonsaiStorageConfig::default(); + let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref().clone(), config) + .expect("Failed to create bonsai storage"); - let mut id_builder = BasicIdBuilder::new(); + let mut id_builder = BasicIdBuilder::new(); - let zero = id_builder.new_id(); - bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); + let zero = id_builder.new_id(); + bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); - for (i, event) in events.iter().enumerate() { - let event_hash = calculate_event_hash::(event); - let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); - } + let mut set = JoinSet::new(); + for (i, event) in events.iter().cloned().enumerate() { + let arc_event = Arc::new(event); + set.spawn(async move { (i, get_hash::(&Arc::clone(&arc_event))) }); + } + + while let Some(res) = set.join_next().await { + let (i, event_hash) = res.map_err(|e| format!("Failed to compute event hash: {e}"))?; + let key = BitVec::from_vec(i.to_be_bytes().to_vec()); + let value = Felt::from(Felt252Wrapper::from(event_hash)); + bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); + } + let root_hash = spawn_blocking(move || { let id = id_builder.new_id(); bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); bonsai_storage.revert_to(zero).unwrap(); - Ok(Felt252Wrapper::from(root_hash)) - } else { - Ok(Felt252Wrapper::ZERO) + root_hash + }) + .await + .unwrap(); + + Ok(Felt252Wrapper::from(root_hash)) +} + +lazy_static! { + static ref EVENT_HASHES: SkipMap = SkipMap::new(); +} + +fn get_hash(event: &Event) -> FieldElement +where + H: HasherT, +{ + match EVENT_HASHES.get(event) { + Some(entry) => entry.value().clone(), + None => store_hash::(event), } } + +fn store_hash(event: &Event) -> FieldElement +where + H: HasherT, +{ + let event_hash = calculate_event_hash::(event); + EVENT_HASHES.insert(event.clone(), event_hash); + + event_hash +} diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index c54367d897..8e260375dd 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -18,18 +18,23 @@ use super::transactions::transaction_commitment; /// # Returns /// /// The transaction commitment, the event commitment and the event count. -pub fn calculate_commitments( +pub async fn calculate_commitments( transactions: &[Transaction], events: &[Event], chain_id: Felt252Wrapper, block_number: u64, backend: Arc>, ) -> (Felt252Wrapper, Felt252Wrapper) { - ( - transaction_commitment::(transactions, chain_id, block_number, &backend.bonsai().clone()) - .expect("Failed to calculate transaction commitment"), - event_commitment::(events, &backend.bonsai().clone()).expect("Failed to calculate event commitment"), - ) + let bonsai = backend.bonsai(); + + let commitment_tx = transaction_commitment::(transactions, chain_id, block_number, Arc::clone(bonsai)) + .await + .expect("Failed to calculate transaction commitment"); + + let commitment_event = + event_commitment::(events, Arc::clone(bonsai)).await.expect("Failed to calculate event commitment"); + + (commitment_tx, commitment_event) } // /// Calculate the transaction commitment, the event commitment and the event count. diff --git a/crates/client/deoxys/src/commitments/transactions.rs b/crates/client/deoxys/src/commitments/transactions.rs index 74dce6e2f6..65c57c80cf 100644 --- a/crates/client/deoxys/src/commitments/transactions.rs +++ b/crates/client/deoxys/src/commitments/transactions.rs @@ -4,7 +4,6 @@ use bitvec::prelude::*; use bonsai_trie::id::BasicIdBuilder; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; use mc_db::bonsai_db::BonsaiDb; -use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::HasherT; use mp_transactions::compute_hash::ComputeTransactionHash; @@ -13,6 +12,7 @@ use sp_runtime::traits::Block as BlockT; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; +use tokio::task::{spawn_blocking, JoinSet}; /// Compute the combined hash of the transaction hash and the signature. /// @@ -54,38 +54,49 @@ where transaction_hashes } -pub(crate) fn transaction_commitment( +pub(crate) async fn transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, block_number: u64, - backend: &Arc>, -) -> Result + backend: Arc>, +) -> Result where B: BlockT, H: HasherT, { let config = BonsaiStorageConfig::default(); - let bonsai_db = backend.as_ref(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); + let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref().clone(), config) + .expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); let zero = id_builder.new_id(); bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); - for (i, tx) in transactions.iter().enumerate() { - let tx_hash = calculate_transaction_hash_with_signature::(tx, chain_id, block_number); + let mut set = JoinSet::new(); + for (i, tx) in transactions.iter().cloned().enumerate() { + let arc_tx = Arc::new(tx); + set.spawn(async move { (i, calculate_transaction_hash_with_signature::(&arc_tx, chain_id, block_number)) }); + } + + while let Some(res) = set.join_next().await { + let (i, tx_hash) = res.map_err(|e| format!("Failed to compute transaction hash: {e}"))?; let key = BitVec::from_vec(i.to_be_bytes().to_vec()); let value = Felt::from(Felt252Wrapper::from(tx_hash)); bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + let root_hash = spawn_blocking(move || { + let id = id_builder.new_id(); + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + + let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); + bonsai_storage.revert_to(zero).unwrap(); - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - bonsai_storage.revert_to(zero).unwrap(); + root_hash + }) + .await + .unwrap(); Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/l2.rs b/crates/client/deoxys/src/l2.rs index c250f33d86..038db514f1 100644 --- a/crates/client/deoxys/src/l2.rs +++ b/crates/client/deoxys/src/l2.rs @@ -220,10 +220,8 @@ async fn fetch_block( let block = client.get_block(BlockId::Number(block_number)).await.map_err(|e| format!("failed to get block: {e}"))?; - block_sender - .send(crate::convert::block(&block, backend)) - .await - .map_err(|e| format!("failed to dispatch block: {e}"))?; + let block_conv = crate::convert::block(block, backend).await; + block_sender.send(block_conv).await.map_err(|e| format!("failed to dispatch block: {e}"))?; Ok(()) } diff --git a/crates/client/deoxys/src/utils/convert.rs b/crates/client/deoxys/src/utils/convert.rs index 9a419613bd..b33c295245 100644 --- a/crates/client/deoxys/src/utils/convert.rs +++ b/crates/client/deoxys/src/utils/convert.rs @@ -11,27 +11,26 @@ use starknet_providers::sequencer::models as p; use crate::commitments::lib::calculate_commitments; -pub fn block(block: &p::Block, backend: Arc>) -> mp_block::Block { - let transactions = transactions(&block.transactions); - let events = events(&block.transaction_receipts); - let block_number = block.block_number.expect("no block number provided"); - let sequencer_address = block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address); - let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number, backend); - let l1_gas_price = resource_price(block.eth_l1_gas_price); - let protocol_version = starknet_version(&block.starknet_version); +pub async fn block(block: p::Block, backend: Arc>) -> mp_block::Block { + let count_tx = block.transactions.len() as u128; + + let mp_txs = conv_txs(block.transactions); + let mp_block_number = block.block_number.expect("no block number provided"); + let mp_events = events(&block.transaction_receipts); + let (transaction_commitment, event_commitment) = commitments(&mp_txs, &mp_events, mp_block_number, backend).await; let header = mp_block::Header { parent_block_hash: felt(block.parent_block_hash), - block_number, + block_number: mp_block_number, block_timestamp: block.timestamp, global_state_root: felt(block.state_root.expect("no state root provided")), - sequencer_address, - transaction_count: block.transactions.len() as u128, + sequencer_address: block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address), + transaction_count: count_tx, transaction_commitment, - event_count: events.len() as u128, + event_count: mp_events.len() as u128, event_commitment, - protocol_version, - l1_gas_price, + protocol_version: starknet_version(&block.starknet_version), + l1_gas_price: resource_price(block.eth_l1_gas_price), extra_data: block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())), }; @@ -43,51 +42,51 @@ pub fn block(block: &p::Block, backend: Arc>) -> mp .map(|(i, r)| mp_block::OrderedEvents::new(i as u128, r.events.iter().map(event).collect())) .collect(); - mp_block::Block::new(header, transactions, ordered_events) + mp_block::Block::new(header, mp_txs, ordered_events) } -fn transactions(txs: &[p::TransactionType]) -> Vec { - txs.iter().map(transaction).collect() +fn conv_txs(txs: Vec) -> Vec { + txs.into_iter().map(conv_tx).collect() } -fn transaction(transaction: &p::TransactionType) -> mp_transactions::Transaction { +fn conv_tx(transaction: p::TransactionType) -> mp_transactions::Transaction { match transaction { - p::TransactionType::InvokeFunction(tx) => mp_transactions::Transaction::Invoke(invoke_transaction(tx)), - p::TransactionType::Declare(tx) => mp_transactions::Transaction::Declare(declare_transaction(tx)), - p::TransactionType::Deploy(tx) => mp_transactions::Transaction::Deploy(deploy_transaction(tx)), + p::TransactionType::InvokeFunction(tx) => mp_transactions::Transaction::Invoke(conv_tx_invoke(tx)), + p::TransactionType::Declare(tx) => mp_transactions::Transaction::Declare(conv_tx_declare(tx)), + p::TransactionType::Deploy(tx) => mp_transactions::Transaction::Deploy(conv_tx_deploy(tx)), p::TransactionType::DeployAccount(tx) => { - mp_transactions::Transaction::DeployAccount(deploy_account_transaction(tx)) + mp_transactions::Transaction::DeployAccount(conv_tx_deploy_account(tx)) } - p::TransactionType::L1Handler(tx) => mp_transactions::Transaction::L1Handler(l1_handler_transaction(tx)), + p::TransactionType::L1Handler(tx) => mp_transactions::Transaction::L1Handler(conv_tx_l1_handler(tx)), } } -fn invoke_transaction(tx: &p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { +fn conv_tx_invoke(tx: p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { if tx.version == FieldElement::ZERO { mp_transactions::InvokeTransaction::V0(mp_transactions::InvokeTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), contract_address: felt(tx.sender_address).into(), entry_point_selector: felt(tx.entry_point_selector.expect("no entry_point_selector provided")).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), }) } else { mp_transactions::InvokeTransaction::V1(mp_transactions::InvokeTransactionV1 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce.expect("no nonce provided")).into(), sender_address: felt(tx.sender_address).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), offset_version: false, }) } } -fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTransaction { +fn conv_tx_declare(tx: p::DeclareTransaction) -> mp_transactions::DeclareTransaction { if tx.version == FieldElement::ZERO { mp_transactions::DeclareTransaction::V0(mp_transactions::DeclareTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -95,7 +94,7 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } else if tx.version == FieldElement::ONE { mp_transactions::DeclareTransaction::V1(mp_transactions::DeclareTransactionV1 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -104,7 +103,7 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } else { mp_transactions::DeclareTransaction::V2(mp_transactions::DeclareTransactionV2 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -114,29 +113,29 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } } -fn deploy_transaction(tx: &p::DeployTransaction) -> mp_transactions::DeployTransaction { +fn conv_tx_deploy(tx: p::DeployTransaction) -> mp_transactions::DeployTransaction { mp_transactions::DeployTransaction { version: starknet_api::transaction::TransactionVersion(felt(tx.version)), class_hash: felt(tx.class_hash).into(), contract_address: felt(tx.contract_address).into(), contract_address_salt: felt(tx.contract_address_salt).into(), - constructor_calldata: tx.constructor_calldata.iter().copied().map(felt).map(Into::into).collect(), + constructor_calldata: tx.constructor_calldata.into_iter().map(felt).map(Into::into).collect(), } } -fn deploy_account_transaction(tx: &p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { +fn conv_tx_deploy_account(tx: p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { mp_transactions::DeployAccountTransaction { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), contract_address_salt: felt(tx.contract_address_salt).into(), - constructor_calldata: tx.constructor_calldata.iter().copied().map(felt).map(Into::into).collect(), + constructor_calldata: tx.constructor_calldata.into_iter().map(felt).map(Into::into).collect(), class_hash: felt(tx.class_hash).into(), offset_version: false, } } -fn l1_handler_transaction(tx: &p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { +fn conv_tx_l1_handler(tx: p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { mp_transactions::HandleL1MessageTransaction { nonce: tx .nonce @@ -148,7 +147,7 @@ fn l1_handler_transaction(tx: &p::L1HandlerTransaction) -> mp_transactions::Hand }), contract_address: felt(tx.contract_address).into(), entry_point_selector: felt(tx.entry_point_selector).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), } } @@ -190,7 +189,7 @@ fn event(event: &p::Event) -> starknet_api::transaction::Event { } } -fn commitments( +async fn commitments( transactions: &[mp_transactions::Transaction], events: &[starknet_api::transaction::Event], block_number: u64, @@ -200,7 +199,8 @@ fn commitments( let chain_id = chain_id(); - let (a, b) = calculate_commitments::(transactions, events, chain_id, block_number, backend); + let (a, b) = + calculate_commitments::(transactions, events, chain_id, block_number, backend).await; (a.into(), b.into()) } diff --git a/crates/primitives/transactions/src/lib.rs b/crates/primitives/transactions/src/lib.rs index 832eb133e4..523b367543 100644 --- a/crates/primitives/transactions/src/lib.rs +++ b/crates/primitives/transactions/src/lib.rs @@ -117,7 +117,7 @@ pub enum UserTransaction { Invoke(InvokeTransaction), } -#[derive(Clone, Debug, Eq, PartialEq, From)] +#[derive(Clone, Debug, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum Transaction { @@ -136,7 +136,7 @@ pub enum UserOrL1HandlerTransaction { L1Handler(HandleL1MessageTransaction, Fee), } -#[derive(Debug, Clone, Eq, PartialEq, From)] +#[derive(Debug, Clone, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum InvokeTransaction { @@ -144,7 +144,7 @@ pub enum InvokeTransaction { V1(InvokeTransactionV1), } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct InvokeTransactionV0 { @@ -155,7 +155,7 @@ pub struct InvokeTransactionV0 { pub calldata: Vec, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct InvokeTransactionV1 { @@ -167,7 +167,7 @@ pub struct InvokeTransactionV1 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq, From)] +#[derive(Debug, Clone, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum DeclareTransaction { @@ -176,7 +176,7 @@ pub enum DeclareTransaction { V2(DeclareTransactionV2), } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV0 { @@ -187,7 +187,7 @@ pub struct DeclareTransactionV0 { pub sender_address: Felt252Wrapper, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV1 { @@ -199,7 +199,7 @@ pub struct DeclareTransactionV1 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV2 { @@ -212,7 +212,7 @@ pub struct DeclareTransactionV2 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeployAccountTransaction { @@ -225,7 +225,7 @@ pub struct DeployAccountTransaction { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeployTransaction { @@ -236,7 +236,7 @@ pub struct DeployTransaction { pub constructor_calldata: Vec, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct HandleL1MessageTransaction { From d83d3539772461837faa45ef246adf0da3c80406 Mon Sep 17 00:00:00 2001 From: Trantorian Date: Fri, 23 Feb 2024 21:31:14 +0000 Subject: [PATCH 2/7] perf(state_commitment): :zap: furher performance improvements and refactoring --- crates/client/db/src/bonsai_db.rs | 4 +- .../client/deoxys/src/commitments/events.rs | 37 +++++++++++-------- crates/client/deoxys/src/commitments/lib.rs | 16 ++++---- .../deoxys/src/commitments/transactions.rs | 34 +++++++++-------- crates/client/deoxys/src/utils/convert.rs | 4 +- 5 files changed, 54 insertions(+), 41 deletions(-) diff --git a/crates/client/db/src/bonsai_db.rs b/crates/client/db/src/bonsai_db.rs index d580dfe503..706b94d572 100644 --- a/crates/client/db/src/bonsai_db.rs +++ b/crates/client/db/src/bonsai_db.rs @@ -17,7 +17,7 @@ pub struct BonsaiDb { pub(crate) _marker: PhantomData, } -impl BonsaiDatabase for BonsaiDb { +impl BonsaiDatabase for &BonsaiDb { type Batch = DBTransaction; type DatabaseError = BonsaiDbError; @@ -175,7 +175,7 @@ impl BonsaiDatabase for TransactionWrapper { } /// This implementation is a stub to mute any error but is is currently not used. -impl BonsaiPersistentDatabase for BonsaiDb { +impl BonsaiPersistentDatabase for &BonsaiDb { type Transaction = TransactionWrapper; type DatabaseError = BonsaiDbError; diff --git a/crates/client/deoxys/src/commitments/events.rs b/crates/client/deoxys/src/commitments/events.rs index 9684a99ba6..61b6a19cbf 100644 --- a/crates/client/deoxys/src/commitments/events.rs +++ b/crates/client/deoxys/src/commitments/events.rs @@ -13,7 +13,7 @@ use starknet_api::transaction::Event; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; -use tokio::task::{spawn_blocking, JoinSet}; +use tokio::task::JoinSet; /// Calculate the hash of an event. /// @@ -57,7 +57,7 @@ pub fn calculate_event_hash(event: &Event) -> FieldElement { /// The merkle root of the merkle tree built from the events. pub(crate) async fn event_commitment( events: &[Event], - backend: Arc>, + backend: &Arc>, ) -> Result where B: BlockT, @@ -68,42 +68,49 @@ where } let config = BonsaiStorageConfig::default(); - let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref().clone(), config) - .expect("Failed to create bonsai storage"); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref(), config).expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); let zero = id_builder.new_id(); bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); + // event hashes are calculated in parallel let mut set = JoinSet::new(); for (i, event) in events.iter().cloned().enumerate() { let arc_event = Arc::new(event); set.spawn(async move { (i, get_hash::(&Arc::clone(&arc_event))) }); } + // resulting hashes are waited for and added to the Bonsai Trie db while let Some(res) = set.join_next().await { let (i, event_hash) = res.map_err(|e| format!("Failed to compute event hash: {e}"))?; let key = BitVec::from_vec(i.to_be_bytes().to_vec()); let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); + bonsai_storage + .insert(key.as_bitslice(), &value) + .map_err(|_| format!("Failed to insert into bonsai storage"))?; } - let root_hash = spawn_blocking(move || { - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. + let id = id_builder.new_id(); + bonsai_storage.commit(id).map_err(|_| format!("Failed to commit to bonsai storage"))?; - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - bonsai_storage.revert_to(zero).unwrap(); - - root_hash - }) - .await - .unwrap(); + // restores the Bonsai Trie to it's previous state + let root_hash = bonsai_storage.root_hash().map_err(|_| format!("Failed to get root hash"))?; + bonsai_storage.revert_to(zero).unwrap(); Ok(Felt252Wrapper::from(root_hash)) } +// Event hashes are cached to avoid re-computing hashes for duplicate events. +// Note that this does not seem to have a huge impact on performance, +// so might be removed in the future if the memory footprint becomes an issue. lazy_static! { static ref EVENT_HASHES: SkipMap = SkipMap::new(); } diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index 8e260375dd..2ae921b01f 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -5,6 +5,7 @@ use mp_hashers::HasherT; use mp_transactions::Transaction; use sp_runtime::traits::Block as BlockT; use starknet_api::transaction::Event; +use tokio::join; use super::events::event_commitment; use super::transactions::transaction_commitment; @@ -27,14 +28,15 @@ pub async fn calculate_commitments( ) -> (Felt252Wrapper, Felt252Wrapper) { let bonsai = backend.bonsai(); - let commitment_tx = transaction_commitment::(transactions, chain_id, block_number, Arc::clone(bonsai)) - .await - .expect("Failed to calculate transaction commitment"); + let (commitment_tx, commitment_event) = join!( + transaction_commitment::(transactions, chain_id, block_number, bonsai), + event_commitment::(events, bonsai) + ); - let commitment_event = - event_commitment::(events, Arc::clone(bonsai)).await.expect("Failed to calculate event commitment"); - - (commitment_tx, commitment_event) + ( + commitment_tx.expect("Failed to calculate transaction commitment"), + commitment_event.expect("Failed to calculate event commitment"), + ) } // /// Calculate the transaction commitment, the event commitment and the event count. diff --git a/crates/client/deoxys/src/commitments/transactions.rs b/crates/client/deoxys/src/commitments/transactions.rs index 65c57c80cf..cd6086fc37 100644 --- a/crates/client/deoxys/src/commitments/transactions.rs +++ b/crates/client/deoxys/src/commitments/transactions.rs @@ -12,7 +12,7 @@ use sp_runtime::traits::Block as BlockT; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; -use tokio::task::{spawn_blocking, JoinSet}; +use tokio::task::JoinSet; /// Compute the combined hash of the transaction hash and the signature. /// @@ -58,45 +58,49 @@ pub(crate) async fn transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, block_number: u64, - backend: Arc>, + backend: &Arc>, ) -> Result where B: BlockT, H: HasherT, { let config = BonsaiStorageConfig::default(); - let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref().clone(), config) - .expect("Failed to create bonsai storage"); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref(), config).expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); let zero = id_builder.new_id(); bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); + // transaction hashes are calculated in parallel let mut set = JoinSet::new(); for (i, tx) in transactions.iter().cloned().enumerate() { let arc_tx = Arc::new(tx); set.spawn(async move { (i, calculate_transaction_hash_with_signature::(&arc_tx, chain_id, block_number)) }); } + // resulting hashes are waited for and added to the Bonsai Trie db while let Some(res) = set.join_next().await { let (i, tx_hash) = res.map_err(|e| format!("Failed to compute transaction hash: {e}"))?; let key = BitVec::from_vec(i.to_be_bytes().to_vec()); let value = Felt::from(Felt252Wrapper::from(tx_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); + bonsai_storage + .insert(key.as_bitslice(), &value) + .map_err(|_| format!("Failed to insert into bonsai storage"))?; } - let root_hash = spawn_blocking(move || { - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. + let id = id_builder.new_id(); + bonsai_storage.commit(id).map_err(|_| format!("Failed to commit to bonsai storage"))?; - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - bonsai_storage.revert_to(zero).unwrap(); - - root_hash - }) - .await - .unwrap(); + // restores the Bonsai Trie to it's previous state + let root_hash = bonsai_storage.root_hash().map_err(|_| format!("Failed to get root hash"))?; + bonsai_storage.revert_to(zero).unwrap(); Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/utils/convert.rs b/crates/client/deoxys/src/utils/convert.rs index b33c295245..c2711b8e13 100644 --- a/crates/client/deoxys/src/utils/convert.rs +++ b/crates/client/deoxys/src/utils/convert.rs @@ -199,10 +199,10 @@ async fn commitments( let chain_id = chain_id(); - let (a, b) = + let (commitment_tx, commitment_event) = calculate_commitments::(transactions, events, chain_id, block_number, backend).await; - (a.into(), b.into()) + (commitment_tx.into(), commitment_event.into()) } fn chain_id() -> mp_felt::Felt252Wrapper { From 185cc919dbb19394acf36e94845e4119882e7cc2 Mon Sep 17 00:00:00 2001 From: Trantorian Date: Fri, 23 Feb 2024 21:37:23 +0000 Subject: [PATCH 3/7] chore(changelog): :green_heart: updated `CHANGELOG.md` --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37827f3031..38002fd878 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ git # Madara Changelog ## Next release +- perf(state_commitment): parallelized state commitment hash computations - fix(L1): fix l1 thread with battle tested implementation + removed l1-l2 - fix: update and store ConfigFetch in l2 sync(), chainId rpc call - fix: get_events paging with continuation_token From 288d6aaf3d39fcaaa954ed73d87d8e2514517385 Mon Sep 17 00:00:00 2001 From: Trantorian Date: Sun, 25 Feb 2024 17:16:55 +0000 Subject: [PATCH 4/7] perf(state_commitment): :zap: Adapted performance improvements to new state commitment logic --- crates/client/db/src/bonsai_db.rs | 9 +- crates/client/db/src/lib.rs | 1 - .../client/deoxys/src/commitments/classes.rs | 2 +- .../deoxys/src/commitments/contracts.rs | 4 +- .../client/deoxys/src/commitments/events.rs | 117 +++++++++--------- crates/client/deoxys/src/commitments/lib.rs | 13 +- .../deoxys/src/commitments/transactions.rs | 39 ++++-- crates/client/deoxys/src/l2.rs | 11 +- crates/client/deoxys/src/utils/convert.rs | 68 +++++----- 9 files changed, 141 insertions(+), 123 deletions(-) diff --git a/crates/client/db/src/bonsai_db.rs b/crates/client/db/src/bonsai_db.rs index 4d710d956c..abe58a4f33 100644 --- a/crates/client/db/src/bonsai_db.rs +++ b/crates/client/db/src/bonsai_db.rs @@ -1,6 +1,4 @@ -use std::default; use std::marker::PhantomData; -use std::sync::atomic::AtomicU32; use std::sync::Arc; use bonsai_trie::id::Id; @@ -47,7 +45,6 @@ impl TrieColumn { } /// Represents a Bonsai database instance parameterized by a block type. -#[derive(Clone)] pub struct BonsaiDb { /// Database interface for key-value operations. pub(crate) db: Arc, @@ -59,9 +56,9 @@ pub struct BonsaiDb { pub fn key_type(key: &DatabaseKey) -> KeyType { match key { - DatabaseKey::Trie(bytes) => return KeyType::Trie, - DatabaseKey::Flat(bytes) => return KeyType::Flat, - DatabaseKey::TrieLog(bytes) => return KeyType::TrieLog, + DatabaseKey::Trie(_) => return KeyType::Trie, + DatabaseKey::Flat(_) => return KeyType::Flat, + DatabaseKey::TrieLog(_) => return KeyType::TrieLog, } } diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 0c124c6bc3..e05f1c3efc 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -30,7 +30,6 @@ mod meta_db; use std::marker::PhantomData; use std::path::{Path, PathBuf}; -use std::sync::atomic::AtomicU32; use std::sync::Arc; use bonsai_db::{BonsaiDb, TrieColumn}; diff --git a/crates/client/deoxys/src/commitments/classes.rs b/crates/client/deoxys/src/commitments/classes.rs index 21554ea355..b4435f3898 100644 --- a/crates/client/deoxys/src/commitments/classes.rs +++ b/crates/client/deoxys/src/commitments/classes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bitvec::vec::BitVec; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; -use mc_db::bonsai_db::{BonsaiDb, TrieColumn}; +use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::poseidon::PoseidonHasher; diff --git a/crates/client/deoxys/src/commitments/contracts.rs b/crates/client/deoxys/src/commitments/contracts.rs index c9ba6570d7..3bc0ca9534 100644 --- a/crates/client/deoxys/src/commitments/contracts.rs +++ b/crates/client/deoxys/src/commitments/contracts.rs @@ -1,12 +1,10 @@ use std::sync::Arc; use bitvec::prelude::BitVec; -use blockifier::execution::contract_address; use blockifier::state::cached_state::CommitmentStateDiff; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; -use ethers::addressbook::Contract; -use mc_db::bonsai_db::{BonsaiDb, TrieColumn}; +use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::pedersen::PedersenHasher; diff --git a/crates/client/deoxys/src/commitments/events.rs b/crates/client/deoxys/src/commitments/events.rs index 9b32f7e897..dc3ba11e27 100644 --- a/crates/client/deoxys/src/commitments/events.rs +++ b/crates/client/deoxys/src/commitments/events.rs @@ -4,8 +4,6 @@ use bitvec::vec::BitVec; use bonsai_trie::databases::HashMapDb; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; -use crossbeam_skiplist::SkipMap; -use lazy_static::lazy_static; use mc_db::bonsai_db::BonsaiDb; use mp_felt::Felt252Wrapper; use mp_hashers::pedersen::PedersenHasher; @@ -15,7 +13,7 @@ use starknet_api::transaction::Event; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; -use tokio::task::JoinSet; +use tokio::task::{spawn_blocking, JoinSet}; /// Calculate the hash of the event. /// @@ -59,33 +57,29 @@ pub fn calculate_event_hash(event: &Event) -> FieldElement { /// # Returns /// /// The event commitment as `Felt252Wrapper`. -pub fn event_commitment( - events: &[Event], - bonsai_db: &Arc>, -) -> Result { - if events.len() > 0 { - let config = BonsaiStorageConfig::default(); - let bonsai_db = bonsai_db.as_ref(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); +#[deprecated = "use `memory_event_commitment` instead"] +pub fn event_commitment(events: &[Event], bonsai_db: &Arc>) -> Result { + if events.is_empty() { + return Ok(Felt252Wrapper::ZERO); + } + + let config = BonsaiStorageConfig::default(); + let bonsai_db = bonsai_db.as_ref(); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); let zero = id_builder.new_id(); bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); - for (i, event) in events.iter().enumerate() { - let event_hash = calculate_event_hash::(event); - let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); - } + for (i, event) in events.iter().enumerate() { + let event_hash = calculate_event_hash::(event); + let key = BitVec::from_vec(i.to_be_bytes().to_vec()); + let value = Felt::from(Felt252Wrapper::from(event_hash)); + bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); + } - // Note that committing changes still has the greatest performance hit - // as this is where the root hash is calculated. Due to the Merkle structure - // of Bonsai Tries, this results in a trie size that grows very rapidly with - // each new insertion. It seems that the only vector of optimization here - // would be to optimize the tree traversal and hash computation. let id = id_builder.new_id(); bonsai_storage.commit(id).map_err(|_| format!("Failed to commit to bonsai storage"))?; @@ -96,23 +90,6 @@ pub fn event_commitment( Ok(Felt252Wrapper::from(root_hash)) } -// Event hashes are cached to avoid re-computing hashes for duplicate events. -// Note that this does not seem to have a huge impact on performance, -// so might be removed in the future if the memory footprint becomes an issue. -lazy_static! { - static ref EVENT_HASHES: SkipMap = SkipMap::new(); -} - -fn get_hash(event: &Event) -> FieldElement -where - H: HasherT, -{ - match EVENT_HASHES.get(event) { - Some(entry) => entry.value().clone(), - None => store_hash::(event), - } -} - /// Calculate the event commitment in memory using HashMapDb (which is more efficient for this /// usecase). /// @@ -123,27 +100,45 @@ where /// # Returns /// /// The event commitment as `Felt252Wrapper`. -pub fn memory_event_commitment(events: &[Event]) -> Result { - if !events.is_empty() { - let config = BonsaiStorageConfig::default(); - let bonsai_db = HashMapDb::::default(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - - for (i, event) in events.iter().enumerate() { - let event_hash = calculate_event_hash::(event); - let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); - } - - let mut id_builder = BasicIdBuilder::new(); - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); +pub async fn memory_event_commitment(events: &[Event]) -> Result { + if events.is_empty() { + return Ok(Felt252Wrapper::ZERO); + } - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - Ok(Felt252Wrapper::from(root_hash)) - } else { - Ok(Felt252Wrapper::ZERO) + let config = BonsaiStorageConfig::default(); + let bonsai_db = HashMapDb::::default(); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); + + // event hashes are computed in parallel + let mut task_set = JoinSet::new(); + events.iter().cloned().enumerate().for_each(|(i, event)| { + task_set.spawn(async move { (i, calculate_event_hash::(&event)) }); + }); + + // once event hashes have finished computing, they are inserted into the local Bonsai db + while let Some(res) = task_set.join_next().await { + let (i, event_hash) = res.map_err(|e| format!("Failed to retrieve event hash: {e}"))?; + let key = BitVec::from_vec(i.to_be_bytes().to_vec()); + let value = Felt::from(Felt252Wrapper::from(event_hash)); + bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } + + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. + let mut id_builder = BasicIdBuilder::new(); + let id = id_builder.new_id(); + + // run in a blocking-safe thread to avoid starving the thread pool + let root_hash = spawn_blocking(move || { + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + bonsai_storage.root_hash().expect("Failed to get root hash") + }) + .await + .map_err(|e| format!("Failed to computed event root hash: {e}"))?; + + Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index 3ce2819f7d..4f2bb84547 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -14,6 +14,7 @@ use starknet_api::api_core::{ClassHash, CompiledClassHash, ContractAddress, Nonc use starknet_api::hash::StarkFelt; use starknet_api::state::StorageKey; use starknet_api::transaction::Event; +use tokio::join; use super::classes::{get_class_trie_root, update_class_trie}; use super::contracts::{get_contract_trie_root, update_contract_trie, update_storage_trie, ContractLeafParams}; @@ -32,16 +33,18 @@ use super::transactions::memory_transaction_commitment; /// # Returns /// /// The transaction and the event commitment as `Felt252Wrapper`. -pub fn calculate_commitments( +pub async fn calculate_commitments( transactions: &[Transaction], events: &[Event], chain_id: Felt252Wrapper, block_number: u64, ) -> (Felt252Wrapper, Felt252Wrapper) { + let (commitment_tx, commitment_event) = + join!(memory_transaction_commitment(transactions, chain_id, block_number), memory_event_commitment(events)); + ( - memory_transaction_commitment(transactions, chain_id, block_number) - .expect("Failed to calculate transaction commitment"), - memory_event_commitment(events).expect("Failed to calculate event commitment"), + commitment_tx.expect("Failed to calculate transaction commitment"), + commitment_event.expect("Failed to calculate event commitment"), ) } @@ -120,8 +123,6 @@ pub fn calculate_state_root( where H: HasherT, { - println!("classes_trie_root: {:?}", classes_trie_root); - println!("contracts_trie_root: {:?}", contracts_trie_root); let starknet_state_prefix = Felt252Wrapper::try_from("STARKNET_STATE_V0".as_bytes()).unwrap(); let state_commitment_hash = diff --git a/crates/client/deoxys/src/commitments/transactions.rs b/crates/client/deoxys/src/commitments/transactions.rs index 14c692f75d..2eb2a0fc91 100644 --- a/crates/client/deoxys/src/commitments/transactions.rs +++ b/crates/client/deoxys/src/commitments/transactions.rs @@ -5,6 +5,7 @@ use bonsai_trie::databases::HashMapDb; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; use mc_db::bonsai_db::BonsaiDb; +use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::pedersen::PedersenHasher; use mp_hashers::HasherT; @@ -14,7 +15,7 @@ use sp_runtime::traits::Block as BlockT; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; -use tokio::task::JoinSet; +use tokio::task::{spawn_blocking, JoinSet}; /// Compute the combined hash of the transaction hash and the signature. /// @@ -71,6 +72,7 @@ where /// # Returns /// /// The transaction commitment as `Felt252Wrapper`. +#[deprecated = "use `memory_transaction_commitment` instead"] pub fn transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, @@ -78,9 +80,8 @@ pub fn transaction_commitment( bonsai_db: &Arc>, ) -> Result { let config = BonsaiStorageConfig::default(); - let bonsai_db = bonsai_db.as_ref(); let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(backend.as_ref(), config).expect("Failed to create bonsai storage"); + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db.as_ref(), config).expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); @@ -115,27 +116,47 @@ pub fn transaction_commitment( /// # Returns /// /// The transaction commitment as `Felt252Wrapper`. -pub fn memory_transaction_commitment( +pub async fn memory_transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, block_number: u64, -) -> Result { +) -> Result { let config = BonsaiStorageConfig::default(); let bonsai_db = HashMapDb::::default(); let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - for (i, tx) in transactions.iter().enumerate() { - let tx_hash = calculate_transaction_hash_with_signature::(tx, chain_id, block_number); + // transaction hashes are computed in parallel + let mut task_set = JoinSet::new(); + transactions.iter().cloned().enumerate().for_each(|(i, tx)| { + task_set.spawn(async move { + (i, calculate_transaction_hash_with_signature::(&tx, chain_id, block_number)) + }); + }); + + // once transaction hashes have finished computing, they are inserted into the local Bonsai db + while let Some(res) = task_set.join_next().await { + let (i, tx_hash) = res.map_err(|e| format!("Failed to retrieve transaction hash: {e}"))?; let key = BitVec::from_vec(i.to_be_bytes().to_vec()); let value = Felt::from(Felt252Wrapper::from(tx_hash)); bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. let mut id_builder = BasicIdBuilder::new(); let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); + // run in a blocking-safe thread to avoid starving the thread pool + let root_hash = spawn_blocking(move || { + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + bonsai_storage.root_hash().expect("Failed to get root hash") + }) + .await + .map_err(|e| format!("Failed to computed transaction root hash: {e}"))?; + Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/l2.rs b/crates/client/deoxys/src/l2.rs index 286457fe6a..2a90a12d4a 100644 --- a/crates/client/deoxys/src/l2.rs +++ b/crates/client/deoxys/src/l2.rs @@ -4,7 +4,6 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use itertools::Itertools; -use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbs; use mc_storage::OverrideHandle; use mp_block::state_update::StateUpdateWrapper; @@ -231,7 +230,7 @@ async fn fetch_block( let block = client.get_block(BlockId::Number(block_number)).await.map_err(|e| format!("failed to get block: {e}"))?; - let block_conv = crate::convert::block(block, backend).await; + let block_conv = crate::convert::block(block).await; block_sender.send(block_conv).await.map_err(|e| format!("failed to dispatch block: {e}"))?; Ok(()) @@ -241,7 +240,7 @@ pub async fn fetch_genesis_block(config: FetchConfig) -> Result( @@ -283,7 +282,7 @@ async fn fetch_state_update( .await .map_err(|e| format!("failed to get state update: {e}"))?; - let _ = verify_l2(block_number, &state_update, bonsai_dbs); + verify_l2(block_number, &state_update, bonsai_dbs)?; Ok(state_update) } @@ -295,7 +294,7 @@ async fn fetch_genesis_state_update( let state_update = provider.get_state_update(BlockId::Number(0)).await.map_err(|e| format!("failed to get state update: {e}"))?; - let _ = verify_l2(0, &state_update, bonsai_dbs); + verify_l2(0, &state_update, bonsai_dbs)?; Ok(state_update) } @@ -455,7 +454,7 @@ pub fn verify_l2( ) -> Result<(), String> { let state_update_wrapper = StateUpdateWrapper::from(state_update); let csd = build_commitment_state_diff(state_update_wrapper.clone()); - let state_root = update_state_root(csd, bonsai_dbs).expect("Failed to update state root"); + let state_root = update_state_root(csd, bonsai_dbs).map_err(|e| format!("Failed to update state root: {e}"))?; let block_hash = state_update.block_hash.expect("Block hash not found in state update"); update_l2(L2StateUpdate { diff --git a/crates/client/deoxys/src/utils/convert.rs b/crates/client/deoxys/src/utils/convert.rs index cdc4733945..efb60026d5 100644 --- a/crates/client/deoxys/src/utils/convert.rs +++ b/crates/client/deoxys/src/utils/convert.rs @@ -8,28 +8,36 @@ use starknet_providers::sequencer::models as p; use crate::commitments::lib::calculate_commitments; -pub fn block(block: &p::Block) -> mp_block::Block { - let transactions = transactions(&block.transactions); +pub async fn block(block: p::Block) -> mp_block::Block { + // converts starknet_provider transactions and events to mp_transactions and starknet_api events + let transactions = transactions(block.transactions); let events = events(&block.transaction_receipts); + + let parent_block_hash = felt(block.parent_block_hash); let block_number = block.block_number.expect("no block number provided"); + let block_timestamp = block.timestamp; + let global_state_root = felt(block.state_root.expect("no state root provided")); let sequencer_address = block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address); - let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number); - let l1_gas_price = resource_price(block.eth_l1_gas_price); + let transaction_count = transactions.len() as u128; + let event_count = events.len() as u128; + let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number).await; let protocol_version = starknet_version(&block.starknet_version); + let l1_gas_price = resource_price(block.eth_l1_gas_price); + let extra_data = block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())); let header = mp_block::Header { - parent_block_hash: felt(block.parent_block_hash), - block_number: mp_block_number, - block_timestamp: block.timestamp, - global_state_root: felt(block.state_root.expect("no state root provided")), - sequencer_address: block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address), - transaction_count: count_tx, + parent_block_hash, + block_number, + block_timestamp, + global_state_root, + sequencer_address, + transaction_count, transaction_commitment, - event_count: mp_events.len() as u128, + event_count, event_commitment, - protocol_version: starknet_version(&block.starknet_version), - l1_gas_price: resource_price(block.eth_l1_gas_price), - extra_data: block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())), + protocol_version, + l1_gas_price, + extra_data, }; let ordered_events: Vec = block @@ -40,26 +48,26 @@ pub fn block(block: &p::Block) -> mp_block::Block { .map(|(i, r)| mp_block::OrderedEvents::new(i as u128, r.events.iter().map(event).collect())) .collect(); - mp_block::Block::new(header, mp_txs, ordered_events) + mp_block::Block::new(header, transactions, ordered_events) } -fn conv_txs(txs: Vec) -> Vec { - txs.into_iter().map(conv_tx).collect() +fn transactions(txs: Vec) -> Vec { + txs.into_iter().map(transaction).collect() } -fn conv_tx(transaction: p::TransactionType) -> mp_transactions::Transaction { +fn transaction(transaction: p::TransactionType) -> mp_transactions::Transaction { match transaction { - p::TransactionType::InvokeFunction(tx) => mp_transactions::Transaction::Invoke(conv_tx_invoke(tx)), - p::TransactionType::Declare(tx) => mp_transactions::Transaction::Declare(conv_tx_declare(tx)), - p::TransactionType::Deploy(tx) => mp_transactions::Transaction::Deploy(conv_tx_deploy(tx)), + p::TransactionType::InvokeFunction(tx) => mp_transactions::Transaction::Invoke(invoke_transaction(tx)), + p::TransactionType::Declare(tx) => mp_transactions::Transaction::Declare(declare_transaction(tx)), + p::TransactionType::Deploy(tx) => mp_transactions::Transaction::Deploy(deploy_transaction(tx)), p::TransactionType::DeployAccount(tx) => { - mp_transactions::Transaction::DeployAccount(conv_tx_deploy_account(tx)) + mp_transactions::Transaction::DeployAccount(deploy_account_transaction(tx)) } - p::TransactionType::L1Handler(tx) => mp_transactions::Transaction::L1Handler(conv_tx_l1_handler(tx)), + p::TransactionType::L1Handler(tx) => mp_transactions::Transaction::L1Handler(l1_handler_transaction(tx)), } } -fn conv_tx_invoke(tx: p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { +fn invoke_transaction(tx: p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { if tx.version == FieldElement::ZERO { mp_transactions::InvokeTransaction::V0(mp_transactions::InvokeTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), @@ -80,7 +88,7 @@ fn conv_tx_invoke(tx: p::InvokeFunctionTransaction) -> mp_transactions::InvokeTr } } -fn conv_tx_declare(tx: p::DeclareTransaction) -> mp_transactions::DeclareTransaction { +fn declare_transaction(tx: p::DeclareTransaction) -> mp_transactions::DeclareTransaction { if tx.version == FieldElement::ZERO { mp_transactions::DeclareTransaction::V0(mp_transactions::DeclareTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), @@ -111,7 +119,7 @@ fn conv_tx_declare(tx: p::DeclareTransaction) -> mp_transactions::DeclareTransac } } -fn conv_tx_deploy(tx: p::DeployTransaction) -> mp_transactions::DeployTransaction { +fn deploy_transaction(tx: p::DeployTransaction) -> mp_transactions::DeployTransaction { mp_transactions::DeployTransaction { version: starknet_api::transaction::TransactionVersion(felt(tx.version)), class_hash: felt(tx.class_hash).into(), @@ -121,7 +129,7 @@ fn conv_tx_deploy(tx: p::DeployTransaction) -> mp_transactions::DeployTransactio } } -fn conv_tx_deploy_account(tx: p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { +fn deploy_account_transaction(tx: p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { mp_transactions::DeployAccountTransaction { max_fee: fee(tx.max_fee.expect("no max fee provided")), signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), @@ -133,7 +141,7 @@ fn conv_tx_deploy_account(tx: p::DeployAccountTransaction) -> mp_transactions::D } } -fn conv_tx_l1_handler(tx: p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { +fn l1_handler_transaction(tx: p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { mp_transactions::HandleL1MessageTransaction { nonce: tx .nonce @@ -187,14 +195,14 @@ fn event(event: &p::Event) -> starknet_api::transaction::Event { } } -fn commitments( +async fn commitments( transactions: &[mp_transactions::Transaction], events: &[starknet_api::transaction::Event], block_number: u64, ) -> (StarkFelt, StarkFelt) { let chain_id = chain_id(); - let (a, b) = calculate_commitments(transactions, events, chain_id, block_number); + let (commitment_tx, commitment_event) = calculate_commitments(transactions, events, chain_id, block_number).await; (commitment_tx.into(), commitment_event.into()) } From 3c384f0459ad6eb68ca1ab1e879f9163f6167904 Mon Sep 17 00:00:00 2001 From: Trantorian Date: Mon, 26 Feb 2024 18:08:52 +0000 Subject: [PATCH 5/7] perf(state_commitment): :zap: Worked on verify_l2 performance --- .../deoxys/src/commitments/contracts.rs | 6 +- crates/client/deoxys/src/commitments/lib.rs | 90 +++++++++++++++---- crates/client/deoxys/src/l2.rs | 13 +-- crates/client/deoxys/src/utils/convert.rs | 2 + 4 files changed, 87 insertions(+), 24 deletions(-) diff --git a/crates/client/deoxys/src/commitments/contracts.rs b/crates/client/deoxys/src/commitments/contracts.rs index 3bc0ca9534..d078273525 100644 --- a/crates/client/deoxys/src/commitments/contracts.rs +++ b/crates/client/deoxys/src/commitments/contracts.rs @@ -32,7 +32,7 @@ pub struct ContractLeafParams { /// The storage root hash. pub fn update_storage_trie( contract_address: &ContractAddress, - commitment_state_diff: CommitmentStateDiff, + csd: &Arc, bonsai_db: &Arc>, ) -> Result { let config = BonsaiStorageConfig::default(); @@ -40,7 +40,7 @@ pub fn update_storage_trie( let mut bonsai_storage: BonsaiStorage, Pedersen> = BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - if let Some(updates) = commitment_state_diff.storage_updates.get(contract_address) { + if let Some(updates) = csd.storage_updates.get(contract_address) { for (storage_key, storage_value) in updates { let key = BitVec::from_vec(Felt252Wrapper::from(storage_key.0.0).0.to_bytes_be()[..31].to_vec()); let value = Felt252Wrapper::from(*storage_value); @@ -116,7 +116,7 @@ pub fn update_contract_trie( contract_hash: Felt252Wrapper, contract_leaf_params: ContractLeafParams, bonsai_db: &Arc>, -) -> Result { +) -> anyhow::Result { let config = BonsaiStorageConfig::default(); let bonsai_db = bonsai_db.as_ref(); let mut bonsai_storage = diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index 4f2bb84547..d63f577531 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use blockifier::state::cached_state::CommitmentStateDiff; use indexmap::IndexMap; use mc_db::bonsai_db::BonsaiDb; -use mc_db::{BonsaiDbError, BonsaiDbs}; +use mc_db::BonsaiDbs; use mp_block::state_update::StateUpdateWrapper; use mp_felt::Felt252Wrapper; use mp_hashers::poseidon::PoseidonHasher; @@ -15,6 +15,7 @@ use starknet_api::hash::StarkFelt; use starknet_api::state::StorageKey; use starknet_api::transaction::Event; use tokio::join; +use tokio::task::{spawn_blocking, JoinSet}; use super::classes::{get_class_trie_root, update_class_trie}; use super::contracts::{get_contract_trie_root, update_contract_trie, update_storage_trie, ContractLeafParams}; @@ -145,33 +146,90 @@ where /// # Returns /// /// The updated state root as a `Felt252Wrapper`. -pub fn update_state_root( +pub async fn update_state_root( csd: CommitmentStateDiff, bonsai_dbs: BonsaiDbs, -) -> Result { - let mut contract_trie_root = Felt252Wrapper::default(); - let mut class_trie_root = Felt252Wrapper::default(); +) -> anyhow::Result { + let arc_csd = Arc::new(csd); + let arc_bonsai_dbs = Arc::new(bonsai_dbs); + + let contract_trie_root = contract_trie_root(Arc::clone(&arc_csd), Arc::clone(&arc_bonsai_dbs)).await?; + + let class_trie_root = class_trie_root(Arc::clone(&arc_csd), Arc::clone(&arc_bonsai_dbs))?; - for (contract_address, class_hash) in csd.address_to_class_hash.iter() { - let storage_root = update_storage_trie(contract_address, csd.clone(), &bonsai_dbs.storage) - .expect("Failed to update storage trie"); - let nonce = csd.address_to_nonce.get(contract_address).unwrap_or(&Felt252Wrapper::default().into()).clone(); + let state_root = calculate_state_root::(contract_trie_root, class_trie_root); - let contract_leaf_params = - ContractLeafParams { class_hash: class_hash.clone().into(), storage_root, nonce: nonce.into() }; + Ok(state_root) +} - contract_trie_root = - update_contract_trie(contract_address.clone().into(), contract_leaf_params, &bonsai_dbs.contract)?; +async fn contract_trie_root( + csd: Arc, + bonsai_dbs: Arc>, +) -> anyhow::Result { + let mut task_set = spawn_blocking(move || { + let mut task_set = JoinSet::new(); + + csd.address_to_class_hash.iter().for_each(|(contract_address, class_hash)| { + let csd_clone = Arc::clone(&csd); + let bonsai_dbs_clone = Arc::clone(&bonsai_dbs); + + task_set.spawn(contract_trie_root_loop( + csd_clone, + bonsai_dbs_clone, + contract_address.clone(), + class_hash.clone(), + )); + }); + + task_set + }) + .await?; + + let mut contract_trie_root = Felt252Wrapper::ZERO; + while let Some(res) = task_set.join_next().await { + contract_trie_root = match res? { + Ok(trie_root) => trie_root, + Err(e) => { + task_set.abort_all(); + return Err(e); + } + } } + Ok(contract_trie_root) +} + +async fn contract_trie_root_loop( + csd: Arc, + bonsai_dbs: Arc>, + contract_address: ContractAddress, + class_hash: ClassHash, +) -> anyhow::Result { + let storage_root = + update_storage_trie(&contract_address, &csd, &bonsai_dbs.storage).expect("Failed to update storage trie"); + let nonce = csd.address_to_nonce.get(&contract_address).unwrap_or(&Felt252Wrapper::default().into()).clone(); + + let contract_leaf_params = + ContractLeafParams { class_hash: class_hash.clone().into(), storage_root, nonce: nonce.into() }; + + update_contract_trie(contract_address.into(), contract_leaf_params, &bonsai_dbs.contract) +} + +fn class_trie_root( + csd: Arc, + bonsai_dbs: Arc>, +) -> anyhow::Result { + let mut class_trie_root = Felt252Wrapper::default(); + + // Based on benchmarks the execution cost of computing the class tried root is negligible + // compared to the contract trie root. It is likely that parallelizing this would yield no + // observalble benefits. for (class_hash, compiled_class_hash) in csd.class_hash_to_compiled_class_hash.iter() { class_trie_root = update_class_trie(class_hash.clone().into(), compiled_class_hash.clone().into(), &bonsai_dbs.class)?; } - let state_root = calculate_state_root::(contract_trie_root, class_trie_root); - - Ok(state_root) + Ok(class_trie_root) } /// Retrieves and compute the actual state root. diff --git a/crates/client/deoxys/src/l2.rs b/crates/client/deoxys/src/l2.rs index 2a90a12d4a..862c21d156 100644 --- a/crates/client/deoxys/src/l2.rs +++ b/crates/client/deoxys/src/l2.rs @@ -282,7 +282,7 @@ async fn fetch_state_update( .await .map_err(|e| format!("failed to get state update: {e}"))?; - verify_l2(block_number, &state_update, bonsai_dbs)?; + verify_l2(block_number, &state_update, bonsai_dbs).await?; Ok(state_update) } @@ -294,7 +294,7 @@ async fn fetch_genesis_state_update( let state_update = provider.get_state_update(BlockId::Number(0)).await.map_err(|e| format!("failed to get state update: {e}"))?; - verify_l2(0, &state_update, bonsai_dbs)?; + verify_l2(0, &state_update, bonsai_dbs).await?; Ok(state_update) } @@ -447,14 +447,18 @@ pub fn update_l2(state_update: L2StateUpdate) { } /// Verify and update the L2 state according to the latest state update -pub fn verify_l2( +pub async fn verify_l2( block_number: u64, state_update: &StateUpdate, bonsai_dbs: BonsaiDbs, ) -> Result<(), String> { let state_update_wrapper = StateUpdateWrapper::from(state_update); + let csd = build_commitment_state_diff(state_update_wrapper.clone()); - let state_root = update_state_root(csd, bonsai_dbs).map_err(|e| format!("Failed to update state root: {e}"))?; + + let state_root = + update_state_root(csd, bonsai_dbs).await.map_err(|e| format!("Failed to update state root: {e}"))?; + let block_hash = state_update.block_hash.expect("Block hash not found in state update"); update_l2(L2StateUpdate { @@ -462,7 +466,6 @@ pub fn verify_l2( global_root: state_root.into(), block_hash: Felt252Wrapper::from(block_hash).into(), }); - println!("➡️ block_number {:?}, block_hash {:?}, state_root {:?}", block_number, block_hash, state_root); Ok(()) } diff --git a/crates/client/deoxys/src/utils/convert.rs b/crates/client/deoxys/src/utils/convert.rs index efb60026d5..f0c920937f 100644 --- a/crates/client/deoxys/src/utils/convert.rs +++ b/crates/client/deoxys/src/utils/convert.rs @@ -20,7 +20,9 @@ pub async fn block(block: p::Block) -> mp_block::Block { let sequencer_address = block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address); let transaction_count = transactions.len() as u128; let event_count = events.len() as u128; + let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number).await; + let protocol_version = starknet_version(&block.starknet_version); let l1_gas_price = resource_price(block.eth_l1_gas_price); let extra_data = block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())); From 9ceb337ec7b250001de6aaffc4e3cd6f3eeae79d Mon Sep 17 00:00:00 2001 From: Trantorian Date: Tue, 27 Feb 2024 04:36:55 +0000 Subject: [PATCH 6/7] perf(state_commitment): :zap: Finished improving l2 verification performance --- crates/client/deoxys/src/commitments/lib.rs | 4 ++++ crates/client/deoxys/src/l2.rs | 1 + 2 files changed, 5 insertions(+) diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index d63f577531..afc25916c0 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -166,6 +166,8 @@ async fn contract_trie_root( csd: Arc, bonsai_dbs: Arc>, ) -> anyhow::Result { + // Risk of starving the thread pool (execution over 1s in some cases), must be run in a + // blocking-safe thread. Main bottleneck is still calling `commit` on the Bonsai db. let mut task_set = spawn_blocking(move || { let mut task_set = JoinSet::new(); @@ -185,6 +187,8 @@ async fn contract_trie_root( }) .await?; + // The order in which contract trie roots are waited for is not important since each call to + // `update_contract_trie` in `contract_trie_root` mutates the Deoxys db. let mut contract_trie_root = Felt252Wrapper::ZERO; while let Some(res) = task_set.join_next().await { contract_trie_root = match res? { diff --git a/crates/client/deoxys/src/l2.rs b/crates/client/deoxys/src/l2.rs index 862c21d156..a5a01f8a5e 100644 --- a/crates/client/deoxys/src/l2.rs +++ b/crates/client/deoxys/src/l2.rs @@ -456,6 +456,7 @@ pub async fn verify_l2( let csd = build_commitment_state_diff(state_update_wrapper.clone()); + // Main l2 sync bottleneck HERE! let state_root = update_state_root(csd, bonsai_dbs).await.map_err(|e| format!("Failed to update state root: {e}"))?; From 3e84910cc2c52b1aaf613badefdfce089c2cfe7e Mon Sep 17 00:00:00 2001 From: Trantorian Date: Tue, 27 Feb 2024 04:51:31 +0000 Subject: [PATCH 7/7] chore(changelog): :green_heart: updated changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38002fd878..8c18579816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ git # Madara Changelog ## Next release +- perf(verify_l2): parallelized l2 state root update - perf(state_commitment): parallelized state commitment hash computations - fix(L1): fix l1 thread with battle tested implementation + removed l1-l2 - fix: update and store ConfigFetch in l2 sync(), chainId rpc call