Skip to content

Commit

Permalink
Merge pull request #133 from Trantorian1/perf/fetch_block
Browse files Browse the repository at this point in the history
perf ⚡ Improved Event and Transaction commitment performance
  • Loading branch information
antiyro authored Feb 27, 2024
2 parents 3f00b99 + 3e84910 commit 3118703
Show file tree
Hide file tree
Showing 14 changed files with 270 additions and 145 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ git # Madara Changelog

## Next release

- perf(verify_l2): parallelized l2 state root update
- perf(state_commitment): parallelized state commitment hash computations
- fix(L1): fix l1 thread with battle tested implementation + removed l1-l2
- fix: update and store ConfigFetch in l2 sync(), chainId rpc call
- fix: get_events paging with continuation_token
Expand Down
11 changes: 11 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ tracing = "0.1.37"
tracing-subscriber = "0.3.16"
url = "2.4.1"
validator = "0.12"
crossbeam-skiplist = "0.1"

[patch."https://github.com/w3f/ring-vrf"]
bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf?rev=3ddc20", version = "0.0.4", rev = "3ddc20" }
8 changes: 3 additions & 5 deletions crates/client/db/src/bonsai_db.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
use std::default;
use std::marker::PhantomData;
use std::sync::atomic::AtomicU32;
use std::sync::Arc;

use bonsai_trie::id::Id;
Expand Down Expand Up @@ -58,9 +56,9 @@ pub struct BonsaiDb<B: BlockT> {

pub fn key_type(key: &DatabaseKey) -> KeyType {
match key {
DatabaseKey::Trie(bytes) => return KeyType::Trie,
DatabaseKey::Flat(bytes) => return KeyType::Flat,
DatabaseKey::TrieLog(bytes) => return KeyType::TrieLog,
DatabaseKey::Trie(_) => return KeyType::Trie,
DatabaseKey::Flat(_) => return KeyType::Flat,
DatabaseKey::TrieLog(_) => return KeyType::TrieLog,
}
}

Expand Down
1 change: 0 additions & 1 deletion crates/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ mod meta_db;

use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicU32;
use std::sync::Arc;

use bonsai_db::{BonsaiDb, TrieColumn};
Expand Down
1 change: 1 addition & 0 deletions crates/client/deoxys/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ serde = { workspace = true, default-features = true }
tokio = { workspace = true, features = ["macros", "parking_lot", "test-util"] }
url = { workspace = true }
validator = { workspace = true, features = ["derive"] }
crossbeam-skiplist ={ workspace = true }

madara-runtime = { workspace = true }
parity-scale-codec = { workspace = true, features = ["derive"] }
Expand Down
2 changes: 1 addition & 1 deletion crates/client/deoxys/src/commitments/classes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::sync::Arc;
use bitvec::vec::BitVec;
use bonsai_trie::id::{BasicId, BasicIdBuilder};
use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig};
use mc_db::bonsai_db::{BonsaiDb, TrieColumn};
use mc_db::bonsai_db::BonsaiDb;
use mc_db::BonsaiDbError;
use mp_felt::Felt252Wrapper;
use mp_hashers::poseidon::PoseidonHasher;
Expand Down
10 changes: 4 additions & 6 deletions crates/client/deoxys/src/commitments/contracts.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
use std::sync::Arc;

use bitvec::prelude::BitVec;
use blockifier::execution::contract_address;
use blockifier::state::cached_state::CommitmentStateDiff;
use bonsai_trie::id::{BasicId, BasicIdBuilder};
use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig};
use ethers::addressbook::Contract;
use mc_db::bonsai_db::{BonsaiDb, TrieColumn};
use mc_db::bonsai_db::BonsaiDb;
use mc_db::BonsaiDbError;
use mp_felt::Felt252Wrapper;
use mp_hashers::pedersen::PedersenHasher;
Expand Down Expand Up @@ -34,15 +32,15 @@ pub struct ContractLeafParams {
/// The storage root hash.
pub fn update_storage_trie<B: BlockT>(
contract_address: &ContractAddress,
commitment_state_diff: CommitmentStateDiff,
csd: &Arc<CommitmentStateDiff>,
bonsai_db: &Arc<BonsaiDb<B>>,
) -> Result<Felt252Wrapper, BonsaiDbError> {
let config = BonsaiStorageConfig::default();
let bonsai_db = bonsai_db.as_ref();
let mut bonsai_storage: BonsaiStorage<BasicId, &BonsaiDb<B>, Pedersen> =
BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage");

if let Some(updates) = commitment_state_diff.storage_updates.get(contract_address) {
if let Some(updates) = csd.storage_updates.get(contract_address) {
for (storage_key, storage_value) in updates {
let key = BitVec::from_vec(Felt252Wrapper::from(storage_key.0.0).0.to_bytes_be()[..31].to_vec());
let value = Felt252Wrapper::from(*storage_value);
Expand Down Expand Up @@ -118,7 +116,7 @@ pub fn update_contract_trie<B: BlockT>(
contract_hash: Felt252Wrapper,
contract_leaf_params: ContractLeafParams,
bonsai_db: &Arc<BonsaiDb<B>>,
) -> Result<Felt252Wrapper, BonsaiDbError> {
) -> anyhow::Result<Felt252Wrapper> {
let config = BonsaiStorageConfig::default();
let bonsai_db = bonsai_db.as_ref();
let mut bonsai_storage =
Expand Down
119 changes: 68 additions & 51 deletions crates/client/deoxys/src/commitments/events.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use std::sync::Arc;

use anyhow::Ok;
use bitvec::vec::BitVec;
use bonsai_trie::databases::HashMapDb;
use bonsai_trie::id::{BasicId, BasicIdBuilder};
Expand All @@ -14,6 +13,7 @@ use starknet_api::transaction::Event;
use starknet_ff::FieldElement;
use starknet_types_core::felt::Felt;
use starknet_types_core::hash::Pedersen;
use tokio::task::{spawn_blocking, JoinSet};

/// Calculate the hash of the event.
///
Expand Down Expand Up @@ -57,38 +57,37 @@ pub fn calculate_event_hash<H: HasherT>(event: &Event) -> FieldElement {
/// # Returns
///
/// The event commitment as `Felt252Wrapper`.
pub fn event_commitment<B: BlockT>(
events: &[Event],
bonsai_db: &Arc<BonsaiDb<B>>,
) -> Result<Felt252Wrapper, anyhow::Error> {
if events.len() > 0 {
let config = BonsaiStorageConfig::default();
let bonsai_db = bonsai_db.as_ref();
let mut bonsai_storage =
BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage");

let mut id_builder = BasicIdBuilder::new();

let zero = id_builder.new_id();
bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage");

for (i, event) in events.iter().enumerate() {
let event_hash = calculate_event_hash::<PedersenHasher>(event);
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(event_hash));
bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
}

let id = id_builder.new_id();
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
#[deprecated = "use `memory_event_commitment` instead"]
pub fn event_commitment<B: BlockT>(events: &[Event], bonsai_db: &Arc<BonsaiDb<B>>) -> Result<Felt252Wrapper, String> {
if events.is_empty() {
return Ok(Felt252Wrapper::ZERO);
}

let config = BonsaiStorageConfig::default();
let bonsai_db = bonsai_db.as_ref();
let mut bonsai_storage =
BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage");

let mut id_builder = BasicIdBuilder::new();

let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash");
bonsai_storage.revert_to(zero).unwrap();
let zero = id_builder.new_id();
bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage");

Ok(Felt252Wrapper::from(root_hash))
} else {
Ok(Felt252Wrapper::ZERO)
for (i, event) in events.iter().enumerate() {
let event_hash = calculate_event_hash::<PedersenHasher>(event);
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(event_hash));
bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
}

let id = id_builder.new_id();
bonsai_storage.commit(id).map_err(|_| format!("Failed to commit to bonsai storage"))?;

// restores the Bonsai Trie to it's previous state
let root_hash = bonsai_storage.root_hash().map_err(|_| format!("Failed to get root hash"))?;
bonsai_storage.revert_to(zero).unwrap();

Ok(Felt252Wrapper::from(root_hash))
}

/// Calculate the event commitment in memory using HashMapDb (which is more efficient for this
Expand All @@ -101,27 +100,45 @@ pub fn event_commitment<B: BlockT>(
/// # Returns
///
/// The event commitment as `Felt252Wrapper`.
pub fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper, anyhow::Error> {
if !events.is_empty() {
let config = BonsaiStorageConfig::default();
let bonsai_db = HashMapDb::<BasicId>::default();
let mut bonsai_storage =
BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage");

for (i, event) in events.iter().enumerate() {
let event_hash = calculate_event_hash::<PedersenHasher>(event);
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(event_hash));
bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
}

let mut id_builder = BasicIdBuilder::new();
let id = id_builder.new_id();
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
pub async fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper, String> {
if events.is_empty() {
return Ok(Felt252Wrapper::ZERO);
}

let config = BonsaiStorageConfig::default();
let bonsai_db = HashMapDb::<BasicId>::default();
let mut bonsai_storage =
BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage");

let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash");
Ok(Felt252Wrapper::from(root_hash))
} else {
Ok(Felt252Wrapper::ZERO)
// event hashes are computed in parallel
let mut task_set = JoinSet::new();
events.iter().cloned().enumerate().for_each(|(i, event)| {
task_set.spawn(async move { (i, calculate_event_hash::<PedersenHasher>(&event)) });
});

// once event hashes have finished computing, they are inserted into the local Bonsai db
while let Some(res) = task_set.join_next().await {
let (i, event_hash) = res.map_err(|e| format!("Failed to retrieve event hash: {e}"))?;
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(event_hash));
bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
}

// Note that committing changes still has the greatest performance hit
// as this is where the root hash is calculated. Due to the Merkle structure
// of Bonsai Tries, this results in a trie size that grows very rapidly with
// each new insertion. It seems that the only vector of optimization here
// would be to optimize the tree traversal and hash computation.
let mut id_builder = BasicIdBuilder::new();
let id = id_builder.new_id();

// run in a blocking-safe thread to avoid starving the thread pool
let root_hash = spawn_blocking(move || {
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
bonsai_storage.root_hash().expect("Failed to get root hash")
})
.await
.map_err(|e| format!("Failed to computed event root hash: {e}"))?;

Ok(Felt252Wrapper::from(root_hash))
}
Loading

0 comments on commit 3118703

Please sign in to comment.