From 0f6335bbd7b60f8bf88c9fe0e43a7728a8fd14c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 24 Jan 2024 15:20:06 +0000 Subject: [PATCH 01/12] merkle tree filter --- crates/apps/src/lib/node/ledger/shell/mod.rs | 10 +++ .../apps/src/lib/node/ledger/storage/mod.rs | 10 +++ .../ethereum_bridge/bridge_pool_vp.rs | 1 + crates/state/src/lib.rs | 81 ++++++++++++------- 4 files changed, 75 insertions(+), 27 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index a9cbba5efe..8434ef1843 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -363,6 +363,15 @@ where event_log: EventLog, } +/// Merkle tree storage key filter. Return `false` for keys that shouldn't be +/// merkelized. +pub fn is_merkelized_storage_key( + key: &namada_sdk::types::storage::Key, +) -> bool { + !token::storage_key::is_masp_key(key) + && !namada::ibc::storage::is_ibc_counter_key(key) +} + /// Channels for communicating with an Ethereum oracle. #[derive(Debug)] pub struct EthereumOracleChannels { @@ -431,6 +440,7 @@ where native_token, db_cache, config.shell.storage_read_past_height_limit, + is_merkelized_storage_key, ); storage .load_last_state() diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 6b2b8e461d..1fc18e763d 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -75,6 +75,7 @@ mod tests { use tempfile::TempDir; use super::*; + use crate::node::ledger::shell::is_merkelized_storage_key; #[test] fn test_crud_value() { @@ -86,6 +87,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; @@ -138,6 +140,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -239,6 +242,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); storage .load_last_state() @@ -263,6 +267,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -309,6 +314,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -376,6 +382,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); // 1. For each `blocks_write_value`, write the current block height if @@ -469,6 +476,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); let num_keys = 5; @@ -587,6 +595,7 @@ mod tests { address::nam(), None, Some(5), + is_merkelized_storage_key, ); let new_epoch_start = BlockHeight(1); let signed_root_key = bridge_pool::get_signed_root_key(); @@ -692,6 +701,7 @@ mod tests { address::nam(), None, None, + is_merkelized_storage_key, ); let mut storage = WlStorage { storage, diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index f1d50cad81..82fa60eaa7 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -920,6 +920,7 @@ mod test_bridge_pool_vp { address::nam(), None, None, + namada_sdk::state::merkelize_all_keys, ), write_log: Default::default(), }; diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 5635e8226e..838e8c3c1c 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -23,7 +23,7 @@ pub use namada_core::types::storage::{ }; use namada_core::types::time::DateTimeUtc; pub use namada_core::types::token::ConversionState; -use namada_core::types::{encode, ethereum_structs}; +use namada_core::types::{encode, ethereum_structs, storage}; use namada_gas::{ MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, @@ -104,6 +104,8 @@ where pub eth_events_queue: EthEventsQueue, /// How many block heights in the past can the storage be queried pub storage_read_past_height_limit: Option, + /// Static merkle tree storage key filter + pub merkle_tree_key_filter: fn(&storage::Key) -> bool, } /// Last committed block @@ -142,6 +144,10 @@ pub struct BlockStorage { pub pred_epochs: Epochs, } +pub fn merkelize_all_keys(_key: &storage::Key) -> bool { + true +} + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -179,6 +185,7 @@ where native_token: Address, cache: Option<&D::Cache>, storage_read_past_height_limit: Option, + merkle_tree_key_filter: fn(&storage::Key) -> bool, ) -> Self { let block = BlockStorage { tree: MerkleTree::default(), @@ -209,6 +216,7 @@ where ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit, + merkle_tree_key_filter, } } @@ -372,6 +380,10 @@ where if height == BlockHeight(0) || height >= self.get_last_block_height() { self.read(key) } else { + if !(self.merkle_tree_key_filter)(key) { + return Ok((None, 0)); + } + match self.db.read_subspace_val_with_height( key, height, @@ -586,36 +598,43 @@ where .expect("the key should be parsable"); let new_key = Key::parse(new.0.clone()) .expect("the key should be parsable"); + // compare keys as String match old.0.cmp(&new.0) { Ordering::Equal => { // the value was updated - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } old_diff = old_diff_iter.next(); new_diff = new_diff_iter.next(); } Ordering::Less => { // the value was deleted - tree.delete(&old_key)?; + if (self.merkle_tree_key_filter)(&old_key) { + tree.delete(&old_key)?; + } old_diff = old_diff_iter.next(); } Ordering::Greater => { // the value was inserted - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } new_diff = new_diff_iter.next(); } } @@ -624,7 +643,11 @@ where // the value was deleted let key = Key::parse(old.0.clone()) .expect("the key should be parsable"); - tree.delete(&key)?; + + if !(self.merkle_tree_key_filter)(&key) { + tree.delete(&key)?; + } + old_diff = old_diff_iter.next(); } (None, Some(new)) => { @@ -632,14 +655,17 @@ where let key = Key::parse(new.0.clone()) .expect("the key should be parsable"); - tree.update( - &key, - if is_pending_transfer_key(&key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; + if (self.merkle_tree_key_filter)(&key) { + tree.update( + &key, + if is_pending_transfer_key(&key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + new_diff = new_diff_iter.next(); } (None, None) => break, @@ -1104,6 +1130,7 @@ pub mod testing { ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit: Some(1000), + merkle_tree_key_filter: merkelize_all_keys, } } } From d68809f7fcd27a25c514b54840d7c5bf4be9f3c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 19 Jan 2024 09:48:19 +0000 Subject: [PATCH 02/12] ibc: storage keys refactor --- crates/ibc/src/storage.rs | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index 2defc63ef9..2137a6e6fa 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -19,9 +19,10 @@ use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; use sha2::{Digest, Sha256}; use thiserror::Error; -const CLIENTS_COUNTER: &str = "clients/counter"; -const CONNECTIONS_COUNTER: &str = "connections/counter"; -const CHANNELS_COUNTER: &str = "channelEnds/counter"; +const CLIENTS_COUNTER_PREFIX: &str = "clients"; +const CONNECTIONS_COUNTER_PREFIX: &str = "connections"; +const CHANNELS_COUNTER_PREFIX: &str = "channelEnds"; +const COUNTER_SEG: &str = "counter"; const DENOM: &str = "ibc_denom"; #[allow(missing_docs)] @@ -52,20 +53,20 @@ pub fn ibc_key(path: impl AsRef) -> Result { /// Returns a key of the IBC client counter pub fn client_counter_key() -> Key { - let path = CLIENTS_COUNTER.to_owned(); + let path = format!("{}/{}", CLIENTS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path).expect("Creating a key for the client counter shouldn't fail") } /// Returns a key of the IBC connection counter pub fn connection_counter_key() -> Key { - let path = CONNECTIONS_COUNTER.to_owned(); + let path = format!("{}/{}", CONNECTIONS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path) .expect("Creating a key for the connection counter shouldn't fail") } /// Returns a key of the IBC channel counter pub fn channel_counter_key() -> Key { - let path = CHANNELS_COUNTER.to_owned(); + let path = format!("{}/{}", CHANNELS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path) .expect("Creating a key for the channel counter shouldn't fail") } @@ -449,3 +450,15 @@ pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { _ => None, } } + +/// Returns true if the given key is for an IBC counter for clients, +/// connections, or channelEnds +pub fn is_ibc_counter_key(key: &Key) -> bool { + matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), DbKeySeg::StringSeg(counter)] + if addr == &Address::Internal(InternalAddress::Ibc) + && (prefix == CLIENTS_COUNTER_PREFIX + || prefix == CONNECTIONS_COUNTER_PREFIX + || prefix == CHANNELS_COUNTER_PREFIX) && counter == COUNTER_SEG + ) +} From 37acb7aaeb75a912f6a38a2a4c3dfe43997d8d34 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 17 Jan 2024 18:52:05 -0500 Subject: [PATCH 03/12] use consts instead of string literals --- .../src/lib/node/ledger/storage/rocksdb.rs | 27 ++++++++++++------- crates/storage/src/mockdb.rs | 15 +++++++---- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index b98b60087e..3975952b95 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -87,6 +87,9 @@ const STATE_CF: &str = "state"; const BLOCK_CF: &str = "block"; const REPLAY_PROTECTION_CF: &str = "replay_protection"; +const OLD_DIFF_PREFIX: &str = "old"; +const NEW_DIFF_PREFIX: &str = "new"; + /// RocksDB handle #[derive(Debug)] pub struct RocksDB(rocksdb::DB); @@ -220,7 +223,7 @@ impl RocksDB { if let Some(old_value) = old_value { let old_val_key = key_prefix - .push(&"old".to_owned()) + .push(&OLD_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -231,7 +234,7 @@ impl RocksDB { if let Some(new_value) = new_value { let new_val_key = key_prefix - .push(&"new".to_owned()) + .push(&NEW_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -257,7 +260,7 @@ impl RocksDB { if let Some(old_value) = old_value { let old_val_key = key_prefix - .push(&"old".to_owned()) + .push(&OLD_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -266,7 +269,7 @@ impl RocksDB { if let Some(new_value) = new_value { let new_val_key = key_prefix - .push(&"new".to_owned()) + .push(&NEW_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -541,7 +544,7 @@ impl RocksDB { let diff_new_key_prefix = Key { segments: vec![ last_block.height.to_db_key(), - "new".to_string().to_db_key(), + NEW_DIFF_PREFIX.to_string().to_db_key(), ], }; { @@ -1196,7 +1199,7 @@ impl DB for RocksDB { let diffs_cf = self.get_column_family(DIFFS_CF)?; let key_prefix = Key::from(height.to_db_key()); let new_val_key = key_prefix - .push(&"new".to_owned()) + .push(&NEW_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -1212,7 +1215,7 @@ impl DB for RocksDB { } None => { let old_val_key = key_prefix - .push(&"old".to_owned()) + .push(&OLD_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -1238,7 +1241,7 @@ impl DB for RocksDB { // Try to find the next diff on this key let key_prefix = Key::from(BlockHeight(raw_height).to_db_key()); let old_val_key = key_prefix - .push(&"old".to_owned()) + .push(&OLD_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -1253,7 +1256,7 @@ impl DB for RocksDB { // Check if the value was created at this height instead, // which would mean that it wasn't present before let new_val_key = key_prefix - .push(&"new".to_owned()) + .push(&NEW_DIFF_PREFIX.to_owned()) .map_err(Error::KeyError)? .join(key) .to_string(); @@ -1570,7 +1573,11 @@ fn iter_diffs_prefix<'a>( let diffs_cf = db .get_column_family(DIFFS_CF) .expect("{DIFFS_CF} column family should exist"); - let kind = if is_old { "old" } else { "new" }; + let kind = if is_old { + OLD_DIFF_PREFIX + } else { + NEW_DIFF_PREFIX + }; let stripped_prefix = Some( Key::from(height.to_db_key()) .push(&kind.to_string()) diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index ad643ca341..6059481fae 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -28,6 +28,11 @@ use crate::db::{ use crate::tx_queue::TxQueue; use crate::types::{KVBytes, PrefixIterator}; +const SUBSPACE_CF: &str = "subspace"; + +const OLD_DIFF_PREFIX: &str = "old"; +const NEW_DIFF_PREFIX: &str = "new"; + /// An in-memory DB for testing. #[derive(Debug, Default)] pub struct MockDB( @@ -460,7 +465,7 @@ impl DB for MockDB { } fn read_subspace_val(&self, key: &Key) -> Result>> { - let key = Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let key = Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); Ok(self.0.borrow().get(&key.to_string()).cloned()) } @@ -523,12 +528,12 @@ impl DB for MockDB { match db.insert(subspace_key.to_string(), value.to_owned()) { Some(prev_value) => { let old_key = diff_prefix - .push(&"old".to_string().to_db_key()) + .push(&OLD_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(old_key.to_string(), prev_value.clone()); let new_key = diff_prefix - .push(&"new".to_string().to_db_key()) + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(new_key.to_string(), value.to_owned()); @@ -536,7 +541,7 @@ impl DB for MockDB { } None => { let new_key = diff_prefix - .push(&"new".to_string().to_db_key()) + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(new_key.to_string(), value.to_owned()); @@ -553,7 +558,7 @@ impl DB for MockDB { key: &Key, ) -> Result { let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); Ok(match db.remove(&subspace_key.to_string()) { From a751e1a627a24ec3385db6ec9f37925184faeb85 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:02:11 -0500 Subject: [PATCH 04/12] bypass merklization and amend diff writing for specified keys --- .../src/lib/node/ledger/storage/rocksdb.rs | 183 ++++++++++++------ crates/benches/host_env.rs | 2 +- crates/state/src/lib.rs | 47 +++-- crates/storage/src/db.rs | 4 + crates/storage/src/lib.rs | 6 +- crates/storage/src/mockdb.rs | 76 +++++++- 6 files changed, 238 insertions(+), 80 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index 3975952b95..6705388e48 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -217,31 +217,55 @@ impl RocksDB { key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, + persist_diffs: bool, ) -> Result<()> { let cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { - let old_val_key = key_prefix - .push(&OLD_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); self.0 .put_cf(cf, old_val_key, old_value) .map_err(|e| Error::DBError(e.into_string()))?; } if let Some(new_value) = new_value { - let new_val_key = key_prefix - .push(&NEW_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); self.0 .put_cf(cf, new_val_key, new_value) .map_err(|e| Error::DBError(e.into_string()))?; } + + // If not persisting the diffs, remove the last diffs. + if !persist_diffs && height > BlockHeight::first() { + let mut height = height.prev_height(); + while height >= BlockHeight::first() { + let (old_diff_key, new_diff_key) = + old_and_new_diff_key(key, height)?; + let has_old_diff = self + .0 + .get_cf(cf, &old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + let has_new_diff = self + .0 + .get_cf(cf, &new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + if has_old_diff { + self.0 + .delete_cf(cf, old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))?; + } + if has_new_diff { + self.0 + .delete_cf(cf, new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))?; + } + if has_old_diff || has_new_diff { + break; + } + height = height.prev_height(); + } + } Ok(()) } @@ -254,27 +278,47 @@ impl RocksDB { key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, + persist_diffs: bool, ) -> Result<()> { let cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { - let old_val_key = key_prefix - .push(&OLD_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); batch.0.put_cf(cf, old_val_key, old_value); } if let Some(new_value) = new_value { - let new_val_key = key_prefix - .push(&NEW_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); batch.0.put_cf(cf, new_val_key, new_value); } + + // If not persisting the diffs, remove the last diffs. + if !persist_diffs && height > BlockHeight::first() { + let mut height = height.prev_height(); + while height >= BlockHeight::first() { + let (old_diff_key, new_diff_key) = + old_and_new_diff_key(key, height)?; + let has_old_diff = self + .0 + .get_cf(cf, &old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + let has_new_diff = self + .0 + .get_cf(cf, &new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + if has_old_diff { + batch.0.delete_cf(cf, old_diff_key); + } + if has_new_diff { + batch.0.delete_cf(cf, new_diff_key); + } + if has_old_diff || has_new_diff { + break; + } + height = height.prev_height(); + } + } Ok(()) } @@ -1197,12 +1241,7 @@ impl DB for RocksDB { ) -> Result>> { // Check if the value changed at this height let diffs_cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); - let new_val_key = key_prefix - .push(&NEW_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; // If it has a "new" val, it was written at this height match self @@ -1214,13 +1253,8 @@ impl DB for RocksDB { return Ok(Some(new_val)); } None => { - let old_val_key = key_prefix - .push(&OLD_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); // If it has an "old" val, it was deleted at this height - if self.0.key_may_exist_cf(diffs_cf, old_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, &old_val_key) { // check if it actually exists if self .0 @@ -1239,15 +1273,11 @@ impl DB for RocksDB { let mut raw_height = height.0 + 1; loop { // Try to find the next diff on this key - let key_prefix = Key::from(BlockHeight(raw_height).to_db_key()); - let old_val_key = key_prefix - .push(&OLD_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); + let (old_val_key, new_val_key) = + old_and_new_diff_key(key, BlockHeight(raw_height))?; let old_val = self .0 - .get_cf(diffs_cf, old_val_key) + .get_cf(diffs_cf, &old_val_key) .map_err(|e| Error::DBError(e.into_string()))?; // If it has an "old" val, it's the one we're looking for match old_val { @@ -1255,12 +1285,7 @@ impl DB for RocksDB { None => { // Check if the value was created at this height instead, // which would mean that it wasn't present before - let new_val_key = key_prefix - .push(&NEW_DIFF_PREFIX.to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); - if self.0.key_may_exist_cf(diffs_cf, new_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, &new_val_key) { // check if it actually exists if self .0 @@ -1288,6 +1313,7 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; let value = value.as_ref(); @@ -1303,11 +1329,18 @@ impl DB for RocksDB { key, Some(&prev_value), Some(value), + persist_diffs, )?; size_diff } None => { - self.write_subspace_diff(height, key, None, Some(value))?; + self.write_subspace_diff( + height, + key, + None, + Some(value), + persist_diffs, + )?; value.len() as i64 } }; @@ -1324,6 +1357,7 @@ impl DB for RocksDB { &mut self, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; @@ -1335,7 +1369,13 @@ impl DB for RocksDB { { Some(prev_value) => { let prev_len = prev_value.len() as i64; - self.write_subspace_diff(height, key, Some(&prev_value), None)?; + self.write_subspace_diff( + height, + key, + Some(&prev_value), + None, + persist_diffs, + )?; prev_len } None => 0, @@ -1363,6 +1403,7 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result { let value = value.as_ref(); let subspace_cf = self.get_column_family(SUBSPACE_CF)?; @@ -1380,6 +1421,7 @@ impl DB for RocksDB { key, Some(&old_value), Some(value), + persist_diffs, )?; size_diff } @@ -1390,6 +1432,7 @@ impl DB for RocksDB { key, None, Some(value), + persist_diffs, )?; value.len() as i64 } @@ -1406,6 +1449,7 @@ impl DB for RocksDB { batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; @@ -1424,6 +1468,7 @@ impl DB for RocksDB { key, Some(&prev_value), None, + persist_diffs, )?; prev_len } @@ -1669,6 +1714,22 @@ fn make_iter_read_opts(prefix: Option) -> ReadOptions { impl DBWriteBatch for RocksDBWriteBatch {} +fn old_and_new_diff_key( + key: &Key, + height: BlockHeight, +) -> Result<(String, String)> { + let key_prefix = Key::from(height.to_db_key()); + let old = key_prefix + .push(&OLD_DIFF_PREFIX.to_owned()) + .map_err(Error::KeyError)? + .join(key); + let new = key_prefix + .push(&NEW_DIFF_PREFIX.to_owned()) + .map_err(Error::KeyError)? + .join(key); + Ok((old.to_string(), new.to_string())) +} + fn unknown_key_error(key: &str) -> Result<()> { Err(Error::UnknownKey { key: key.to_owned(), @@ -1759,6 +1820,7 @@ mod test { last_height, &Key::parse("test").unwrap(), vec![1_u8, 1, 1, 1], + true, ) .unwrap(); @@ -1794,11 +1856,12 @@ mod test { last_height, &batch_key, vec![1_u8, 1, 1, 1], + true, ) .unwrap(); db.exec_batch(batch.0).unwrap(); - db.write_subspace_val(last_height, &key, vec![1_u8, 1, 1, 0]) + db.write_subspace_val(last_height, &key, vec![1_u8, 1, 1, 0], true) .unwrap(); let mut batch = RocksDB::batch(); @@ -1808,11 +1871,12 @@ mod test { last_height, &batch_key, vec![2_u8, 2, 2, 2], + true, ) .unwrap(); db.exec_batch(batch.0).unwrap(); - db.write_subspace_val(last_height, &key, vec![2_u8, 2, 2, 0]) + db.write_subspace_val(last_height, &key, vec![2_u8, 2, 2, 0], true) .unwrap(); let prev_value = db @@ -1851,11 +1915,11 @@ mod test { let mut batch = RocksDB::batch(); let last_height = BlockHeight(222); - db.batch_delete_subspace_val(&mut batch, last_height, &batch_key) + db.batch_delete_subspace_val(&mut batch, last_height, &batch_key, true) .unwrap(); db.exec_batch(batch.0).unwrap(); - db.delete_subspace_val(last_height, &key).unwrap(); + db.delete_subspace_val(last_height, &key, true).unwrap(); let deleted_value = db .read_subspace_val_with_height( @@ -1904,7 +1968,7 @@ mod test { let mut batch = RocksDB::batch(); let height = BlockHeight(1); for key in &all_keys { - db.batch_write_subspace_val(&mut batch, height, key, [0_u8]) + db.batch_write_subspace_val(&mut batch, height, key, [0_u8], true) .unwrap(); } db.exec_batch(batch.0).unwrap(); @@ -1957,6 +2021,7 @@ mod test { height_0, &delete_key, &to_delete_val, + true, ) .unwrap(); db.batch_write_subspace_val( @@ -1964,6 +2029,7 @@ mod test { height_0, &overwrite_key, &to_overwrite_val, + true, ) .unwrap(); @@ -1988,16 +2054,19 @@ mod test { .insert("dummy2".to_string(), gen_established_address("test")); let add_val = vec![1_u8, 0, 0, 0]; let overwrite_val = vec![1_u8, 1, 1, 1]; - db.batch_write_subspace_val(&mut batch, height_1, &add_key, &add_val) - .unwrap(); + db.batch_write_subspace_val( + &mut batch, height_1, &add_key, &add_val, true, + ) + .unwrap(); db.batch_write_subspace_val( &mut batch, height_1, &overwrite_key, &overwrite_val, + true, ) .unwrap(); - db.batch_delete_subspace_val(&mut batch, height_1, &delete_key) + db.batch_delete_subspace_val(&mut batch, height_1, &delete_key, true) .unwrap(); add_block_to_batch( diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index ca38170977..36a79cea05 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -318,7 +318,7 @@ fn storage_write(c: &mut Criterion) { .wl_storage .storage .db - .write_subspace_val(block_height, &key, value) + .write_subspace_val(block_height, &key, value, true) .unwrap(); }, criterion::BatchSize::SmallInput, diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 838e8c3c1c..31af2c87d6 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -432,6 +432,8 @@ where // but with gas and storage bytes len diff accounting tracing::debug!("storage write key {}", key,); let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + if is_pending_transfer_key(key) { // The tree of the bright pool stores the current height for the // pending transfer @@ -439,13 +441,19 @@ where self.block.tree.update(key, height)?; } else { // Update the merkle tree - self.block.tree.update(key, value)?; + if is_key_merklized { + self.block.tree.update(key, value)?; + } } let len = value.len(); let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; - let size_diff = - self.db.write_subspace_val(self.block.height, key, value)?; + let size_diff = self.db.write_subspace_val( + self.block.height, + key, + value, + is_key_merklized, + )?; Ok((gas, size_diff)) } @@ -456,9 +464,15 @@ where // but with gas and storage bytes len diff accounting let mut deleted_bytes_len = 0; if self.has_key(key)?.0 { - self.block.tree.delete(key)?; - deleted_bytes_len = - self.db.delete_subspace_val(self.block.height, key)?; + let is_key_merklized = (self.merkle_tree_key_filter)(key); + if is_key_merklized { + self.block.tree.delete(key)?; + } + deleted_bytes_len = self.db.delete_subspace_val( + self.block.height, + key, + is_key_merklized, + )?; } let gas = (key.len() + deleted_bytes_len as usize) as u64 * STORAGE_WRITE_GAS_PER_BYTE; @@ -894,6 +908,8 @@ where value: impl AsRef<[u8]>, ) -> Result { let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + if is_pending_transfer_key(key) { // The tree of the bridge pool stores the current height for the // pending transfer @@ -901,13 +917,16 @@ where self.block.tree.update(key, height)?; } else { // Update the merkle tree - self.block.tree.update(key, value)?; + if is_key_merklized { + self.block.tree.update(key, value)?; + } } Ok(self.db.batch_write_subspace_val( batch, self.block.height, key, value, + is_key_merklized, )?) } @@ -919,11 +938,17 @@ where batch: &mut D::WriteBatch, key: &Key, ) -> Result { + let is_key_merklized = (self.merkle_tree_key_filter)(key); // Update the merkle tree - self.block.tree.delete(key)?; - Ok(self - .db - .batch_delete_subspace_val(batch, self.block.height, key)?) + if is_key_merklized { + self.block.tree.delete(key)?; + } + Ok(self.db.batch_delete_subspace_val( + batch, + self.block.height, + key, + is_key_merklized, + )?) } // Prune merkle tree stores. Use after updating self.block.height in the diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index a7bbf61dba..71f63d89fb 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -181,6 +181,7 @@ pub trait DB: Debug { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result; /// Delete the value with the given height and account subspace key from the @@ -190,6 +191,7 @@ pub trait DB: Debug { &mut self, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result; /// Start write batch. @@ -207,6 +209,7 @@ pub trait DB: Debug { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result; /// Batch delete the value with the given height and account subspace key @@ -217,6 +220,7 @@ pub trait DB: Debug { batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result; /// Prune Merkle tree stores at the given epoch diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index c2d83b6bac..f73f857011 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -405,15 +405,17 @@ pub mod testing { key: &storage::Key, val: impl AsRef<[u8]>, ) -> Result<()> { + let is_key_merklized = (self.merkle_tree_key_filter)(key); self.db - .write_subspace_val(self.height, key, val) + .write_subspace_val(self.height, key, val, is_key_merklized) .into_storage_result()?; Ok(()) } fn delete(&mut self, key: &storage::Key) -> Result<()> { + let is_key_merklized = (self.merkle_tree_key_filter)(key); self.db - .delete_subspace_val(self.height, key) + .delete_subspace_val(self.height, key, is_key_merklized) .into_storage_result()?; Ok(()) } diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 6059481fae..17e913cc62 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -487,18 +487,31 @@ impl DB for MockDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result { // batch_write are directly committed - self.batch_write_subspace_val(&mut MockDBWriteBatch, height, key, value) + self.batch_write_subspace_val( + &mut MockDBWriteBatch, + height, + key, + value, + persist_diffs, + ) } fn delete_subspace_val( &mut self, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result { // batch_delete are directly committed - self.batch_delete_subspace_val(&mut MockDBWriteBatch, height, key) + self.batch_delete_subspace_val( + &mut MockDBWriteBatch, + height, + key, + persist_diffs, + ) } fn batch() -> Self::WriteBatch { @@ -517,14 +530,17 @@ impl DB for MockDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + persist_diffs: bool, ) -> Result { let value = value.as_ref(); let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); let current_len = value.len() as i64; let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); - Ok( + + // Diffs + let size_diff = match db.insert(subspace_key.to_string(), value.to_owned()) { Some(prev_value) => { let old_key = diff_prefix @@ -547,8 +563,27 @@ impl DB for MockDB { db.insert(new_key.to_string(), value.to_owned()); current_len } - }, - ) + }; + + if !persist_diffs { + if let Some(pruned_height) = height.0.checked_sub(1) { + let pruned_key_prefix = Key::from(pruned_height.to_db_key()); + let old_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&old_val_key); + let new_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&new_val_key); + } + } + + Ok(size_diff) } fn batch_delete_subspace_val( @@ -556,22 +591,45 @@ impl DB for MockDB { _batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + persist_diffs: bool, ) -> Result { let subspace_key = Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); - Ok(match db.remove(&subspace_key.to_string()) { + + let size_diff = match db.remove(&subspace_key.to_string()) { Some(value) => { let old_key = diff_prefix - .push(&"old".to_string().to_db_key()) + .push(&OLD_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(old_key.to_string(), value.clone()); + + if !persist_diffs { + if let Some(pruned_height) = height.0.checked_sub(1) { + let pruned_key_prefix = + Key::from(pruned_height.to_db_key()); + let old_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&old_val_key); + let new_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&new_val_key); + } + } value.len() as i64 } None => 0, - }) + }; + + Ok(size_diff) } fn prune_merkle_tree_store( From 0aed7425295e08314536bbc9f8ae7c1ee2b22014 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:02:52 -0500 Subject: [PATCH 05/12] tests --- .../apps/src/lib/node/ledger/storage/mod.rs | 202 ++++++++++++++++++ .../src/lib/node/ledger/storage/rocksdb.rs | 165 ++++++++++++++ crates/state/src/lib.rs | 188 ++++++++++++++++ crates/storage/src/db.rs | 9 + crates/storage/src/lib.rs | 6 + crates/storage/src/mockdb.rs | 20 ++ 6 files changed, 590 insertions(+) diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 1fc18e763d..04ca1020de 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -52,6 +52,7 @@ fn new_blake2b() -> Blake2b { mod tests { use std::collections::HashMap; + use borsh::BorshDeserialize; use itertools::Itertools; use namada::eth_bridge::storage::proof::BridgePoolRootProof; use namada::ledger::eth_bridge::storage::bridge_pool; @@ -69,6 +70,7 @@ mod tests { use namada::types::time::DurationSecs; use namada::types::{address, storage}; use namada::{parameters, token, types}; + use namada_sdk::storage::{StorageRead, DB}; use proptest::collection::vec; use proptest::prelude::*; use proptest::test_runner::Config; @@ -793,4 +795,204 @@ mod tests { .map(Result::unwrap); itertools::assert_equal(iter, expected); } + + fn test_key_1() -> Key { + Key::parse("testing1").unwrap() + } + + fn test_key_2() -> Key { + Key::parse("testing2").unwrap() + } + + fn merkle_tree_key_filter(key: &Key) -> bool { + key == &test_key_1() + } + + #[test] + fn test_persistent_storage_writing_without_merklizing_or_diffs() { + let db_path = + TempDir::new().expect("Unable to create a temporary DB directory"); + let storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + None, + merkle_tree_key_filter, + ); + let mut wls = WlStorage { + storage, + write_log: Default::default(), + }; + // Start the first block + let first_height = BlockHeight::first(); + wls.storage.block.height = first_height; + + let key1 = test_key_1(); + let val1 = 1u64; + let key2 = test_key_2(); + let val2 = 2u64; + + // Standard write of key-val-1 + wls.write(&key1, val1).unwrap(); + + // Read from WlStorage should return val1 + let res = wls.read::(&key1).unwrap().unwrap(); + assert_eq!(res, val1); + + // Read from Storage shouldn't return val1 because the block hasn't been + // committed + let (res, _) = wls.storage.read(&key1).unwrap(); + assert!(res.is_none()); + + // Write key-val-2 without merklizing or diffs + wls.write(&key2, val2).unwrap(); + + // Read from WlStorage should return val2 + let res = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res, val2); + + // Commit block and storage changes + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + let second_height = wls.storage.block.height; + + // Read key1 from Storage should return val1 + let (res1, _) = wls.storage.read(&key1).unwrap(); + let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); + assert_eq!(res1, val1); + + // Check merkle tree inclusion of key-val-1 explicitly + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + assert!(is_merklized1); + + // Key2 should be in storage. Confirm by reading from + // WlStorage and also by reading Storage subspace directly + let res2 = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res2, val2); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Check explicitly that key-val-2 is not in merkle tree + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized2); + + // Check that the proper diffs exist for key-val-1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + // Check that there are diffs for key-val-2 in block 0, since all keys + // need to have diffs for at least 1 block for rollback purposes + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, false) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Delete the data then commit the block + wls.delete(&key1).unwrap(); + wls.delete(&key2).unwrap(); + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Check the key-vals are removed from the storage subspace + let res1 = wls.read::(&key1).unwrap(); + let res2 = wls.read::(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + + // Check that the key-vals don't exist in the merkle tree anymore + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized1 && !is_merklized2); + + // Check that key-val-1 diffs are properly updated for blocks 0 and 1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, second_height, true) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, second_height, false) + .unwrap(); + assert!(res1.is_none()); + + // Check that key-val-2 diffs don't exist for block 0 anymore + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, false) + .unwrap(); + assert!(res2.is_none()); + + // Check that the block 1 diffs for key-val-2 include an "old" value of + // val2 and no "new" value + let res2 = wls + .storage + .db + .read_diffs_val(&key2, second_height, true) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, second_height, false) + .unwrap(); + assert!(res2.is_none()); + } } diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index 6705388e48..2d64014891 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -1226,6 +1226,24 @@ impl DB for RocksDB { Ok(false) } + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>> { + let diffs_cf = self.get_column_family(DIFFS_CF)?; + let key = if is_old { + old_and_new_diff_key(key, height)?.0 + } else { + old_and_new_diff_key(key, height)?.1 + }; + + self.0 + .get_cf(diffs_cf, key) + .map_err(|e| Error::DBError(e.into_string())) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; self.0 @@ -2107,6 +2125,153 @@ mod test { assert_eq!(conversion_state, types::encode(&conversion_state_0)); } + #[test] + fn test_diffs() { + let dir = tempdir().unwrap(); + let mut db = open(dir.path(), None).unwrap(); + + let key_with_diffs = Key::parse("with_diffs").unwrap(); + let key_without_diffs = Key::parse("without_diffs").unwrap(); + + let initial_val = vec![1_u8, 1, 0, 0]; + let overwrite_val = vec![1_u8, 1, 1, 0]; + + // Write first block + let mut batch = RocksDB::batch(); + let height_0 = BlockHeight::first(); + db.batch_write_subspace_val( + &mut batch, + height_0, + &key_with_diffs, + &initial_val, + true, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_0, + &key_without_diffs, + &initial_val, + false, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs new key for `key_with_diffs` at height_0 must be present + let (old_with_h0, new_with_h0) = + old_and_new_diff_key(&key_with_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_with_h0).unwrap().is_some()); + + // Diffs new key for `key_without_diffs` at height_0 must be present + let (old_wo_h0, new_wo_h0) = + old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_some()); + } + + // Write second block + let mut batch = RocksDB::batch(); + let height_1 = height_0 + 10; + db.batch_write_subspace_val( + &mut batch, + height_1, + &key_with_diffs, + &overwrite_val, + true, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_1, + &key_without_diffs, + &overwrite_val, + false, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs keys for `key_with_diffs` at height_0 must be present + let (old_with_h0, new_with_h0) = + old_and_new_diff_key(&key_with_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_with_h0).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_0 must be gone + let (old_wo_h0, new_wo_h0) = + old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_none()); + + // Diffs keys for `key_with_diffs` at height_1 must be present + let (old_with_h1, new_with_h1) = + old_and_new_diff_key(&key_with_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h1).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_1 must be present + let (old_wo_h1, new_wo_h1) = + old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_some()); + } + + // Write third block + let mut batch = RocksDB::batch(); + let height_2 = height_1 + 10; + db.batch_write_subspace_val( + &mut batch, + height_2, + &key_with_diffs, + &initial_val, + true, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_2, + &key_without_diffs, + &initial_val, + false, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs keys for `key_with_diffs` at height_1 must be present + let (old_with_h1, new_with_h1) = + old_and_new_diff_key(&key_with_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h1).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_1 must be gone + let (old_wo_h1, new_wo_h1) = + old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_none()); + + // Diffs keys for `key_with_diffs` at height_2 must be present + let (old_with_h2, new_with_h2) = + old_and_new_diff_key(&key_with_diffs, height_2).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h2).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h2).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_2 must be present + let (old_wo_h2, new_wo_h2) = + old_and_new_diff_key(&key_without_diffs, height_2).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h2).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_wo_h2).unwrap().is_some()); + } + } + /// A test helper to write a block fn add_block_to_batch( db: &RocksDB, diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 31af2c87d6..5770439fb7 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -1394,4 +1394,192 @@ mod tests { assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); } } + + fn test_key_1() -> Key { + Key::parse("testing1").unwrap() + } + + fn test_key_2() -> Key { + Key::parse("testing2").unwrap() + } + + fn merkle_tree_key_filter(key: &Key) -> bool { + key == &test_key_1() + } + + #[test] + fn test_writing_without_merklizing_or_diffs() { + let mut wls = TestWlStorage::default(); + assert_eq!(wls.storage.block.height.0, 0); + + (wls.storage.merkle_tree_key_filter) = merkle_tree_key_filter; + + let key1 = test_key_1(); + let val1 = 1u64; + let key2 = test_key_2(); + let val2 = 2u64; + + // Standard write of key-val-1 + wls.write(&key1, val1).unwrap(); + + // Read from WlStorage should return val1 + let res = wls.read::(&key1).unwrap().unwrap(); + assert_eq!(res, val1); + + // Read from Storage shouldn't return val1 bc the block hasn't been + // committed + let (res, _) = wls.storage.read(&key1).unwrap(); + assert!(res.is_none()); + + // Write key-val-2 without merklizing or diffs + wls.write(&key2, val2).unwrap(); + + // Read from WlStorage should return val2 + let res = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res, val2); + + // Commit block and storage changes + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Read key1 from Storage should return val1 + let (res1, _) = wls.storage.read(&key1).unwrap(); + let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); + assert_eq!(res1, val1); + + // Check merkle tree inclusion of key-val-1 explicitly + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + assert!(is_merklized1); + + // Key2 should be in storage. Confirm by reading from + // WlStorage and also by reading Storage subspace directly + let res2 = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res2, val2); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Check explicitly that key-val-2 is not in merkle tree + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized2); + + // Check that the proper diffs exist for key-val-1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, Default::default(), true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, Default::default(), false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + // Check that there are diffs for key-val-2 in block 0, since all keys + // need to have diffs for at least 1 block for rollback purposes + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), false) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Now delete the keys properly + wls.delete(&key1).unwrap(); + wls.delete(&key2).unwrap(); + + // Commit the block again + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Check the key-vals are removed from the storage subspace + let res1 = wls.read::(&key1).unwrap(); + let res2 = wls.read::(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + + // Check that the key-vals don't exist in the merkle tree anymore + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized1 && !is_merklized2); + + // Check that key-val-1 diffs are properly updated for blocks 0 and 1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(0), true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(0), false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(1), true) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(1), false) + .unwrap(); + assert!(res1.is_none()); + + // Check that key-val-2 diffs don't exist for block 0 anymore + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), false) + .unwrap(); + assert!(res2.is_none()); + + // Check that the block 1 diffs for key-val-2 include an "old" value of + // val2 and no "new" value + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(1), true) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(1), false) + .unwrap(); + assert!(res2.is_none()); + } } diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index 71f63d89fb..7eef20b918 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -173,6 +173,15 @@ pub trait DB: Debug { last_height: BlockHeight, ) -> Result>>; + /// Read the value for the account diffs at the corresponding height from + /// the DB + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>>; + /// Write the value with the given height and account subspace key to the /// DB. Returns the size difference from previous value, if any, or the /// size of the value otherwise. diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index f73f857011..334e51e50e 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -319,6 +319,11 @@ pub mod testing { epoch: Epoch, pred_epochs: Epochs, native_token: Address, + merkle_tree_key_filter: fn(&storage::Key) -> bool, + } + + fn merklize_all_keys(_key: &storage::Key) -> bool { + true } #[allow(clippy::derivable_impls)] @@ -331,6 +336,7 @@ pub mod testing { epoch: Epoch::default(), pred_epochs: Epochs::default(), native_token: address::nam(), + merkle_tree_key_filter: merklize_all_keys, } } } diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 17e913cc62..c23801536c 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -464,6 +464,26 @@ impl DB for MockDB { Ok(false) } + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>> { + let old_new_seg = if is_old { + OLD_DIFF_PREFIX + } else { + NEW_DIFF_PREFIX + }; + + let prefix = Key::from(height.to_db_key()) + .push(&old_new_seg.to_string().to_db_key()) + .map_err(Error::KeyError)? + .join(key); + + Ok(self.0.borrow().get(&prefix.to_string()).cloned()) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let key = Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); Ok(self.0.borrow().get(&key.to_string()).cloned()) From 24b47a5b7cc6cdc721a658eefe3e78309a8d7d93 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 17 Jan 2024 18:47:43 -0500 Subject: [PATCH 06/12] abstract instances of `write_bytes` to `write` --- .../src/lib/node/ledger/shell/init_chain.rs | 4 +- crates/apps/src/lib/node/ledger/shell/mod.rs | 5 +- .../transactions/ethereum_events/events.rs | 14 +-- .../src/protocol/transactions/read.rs | 7 +- .../src/protocol/transactions/update.rs | 8 +- .../protocol/transactions/votes/storage.rs | 40 +++---- .../ethereum_bridge/src/storage/parameters.rs | 33 +++--- .../src/storage/vp/ethereum_bridge.rs | 11 +- .../ethereum_bridge/bridge_pool_vp.rs | 7 +- .../ledger/native_vp/ethereum_bridge/vp.rs | 5 +- crates/parameters/src/lib.rs | 2 - crates/sdk/src/queries/shell/eth_bridge.rs | 101 ++++-------------- wasm/wasm_source/src/tx_bridge_pool.rs | 2 +- 13 files changed, 75 insertions(+), 164 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index 1b52009eec..ae01eb2e1c 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -215,9 +215,9 @@ where self.update_eth_oracle(&Default::default()); } else { self.wl_storage - .write_bytes( + .write( &namada::eth_bridge::storage::active_key(), - EthBridgeStatus::Disabled.serialize_to_vec(), + EthBridgeStatus::Disabled, ) .unwrap(); } diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index 8434ef1843..c6547f6eb4 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -2051,10 +2051,7 @@ mod test_utils { use namada::eth_bridge::storage::eth_bridge_queries::EthBridgeStatus; shell .wl_storage - .write_bytes( - &active_key(), - EthBridgeStatus::Disabled.serialize_to_vec(), - ) + .write(&active_key(), EthBridgeStatus::Disabled) .expect("Test failed"); } diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 3d9e7cf14f..04335c15a2 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -590,7 +590,7 @@ mod tests { }; use namada_core::types::time::DurationSecs; use namada_core::types::token::Amount; - use namada_core::types::{address, encode, eth_bridge_pool}; + use namada_core::types::{address, eth_bridge_pool}; use namada_parameters::{update_epoch_parameter, EpochDuration}; use namada_state::testing::TestWlStorage; use namada_storage::mockdb::MockDBWriteBatch; @@ -611,7 +611,7 @@ mod tests { .expect("Test failed"); // set native ERC20 token wl_storage - .write_bytes(&bridge_storage::native_erc20_key(), encode(&wnam())) + .write(&bridge_storage::native_erc20_key(), wnam()) .expect("Test failed"); } @@ -757,7 +757,7 @@ mod tests { let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); wl_storage - .write_bytes(&payer_key, payer_balance.serialize_to_vec()) + .write(&payer_key, payer_balance) .expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); @@ -772,24 +772,24 @@ mod tests { let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes(&sender_key, sender_balance.serialize_to_vec()) + .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) + .write(&escrow_key, escrow_balance) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes(&sender_key, sender_balance.serialize_to_vec()) + .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) + .write(&escrow_key, escrow_balance) .expect("Test failed"); update::amount( wl_storage, diff --git a/crates/ethereum_bridge/src/protocol/transactions/read.rs b/crates/ethereum_bridge/src/protocol/transactions/read.rs index d8da8f6520..c618b0335d 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/read.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/read.rs @@ -54,7 +54,6 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::storage; use namada_core::types::token::Amount; use namada_state::testing::TestWlStorage; @@ -79,9 +78,7 @@ mod tests { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = Amount::from(1_000_000); let mut fake_storage = TestWlStorage::default(); - fake_storage - .write_bytes(&key, amount.serialize_to_vec()) - .unwrap(); + fake_storage.write(&key, amount).unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); assert_eq!(amt, amount); @@ -92,7 +89,7 @@ mod tests { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = "not an Amount type"; let mut fake_storage = TestWlStorage::default(); - fake_storage.write_bytes(&key, amount.as_bytes()).unwrap(); + fake_storage.write(&key, amount).unwrap(); assert_matches!(read::amount_or_default(&fake_storage, &key), Err(_)); } diff --git a/crates/ethereum_bridge/src/protocol/transactions/update.rs b/crates/ethereum_bridge/src/protocol/transactions/update.rs index 6d1e1b3e7b..4bb0963e93 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/update.rs @@ -19,7 +19,7 @@ where { let mut amount = super::read::amount_or_default(wl_storage, key)?; update(&mut amount); - wl_storage.write_bytes(key, borsh::to_vec(&amount)?)?; + wl_storage.write(key, amount)?; Ok(amount) } @@ -36,14 +36,13 @@ where { let mut value = super::read::value(wl_storage, key)?; update(&mut value); - wl_storage.write_bytes(key, borsh::to_vec(&value)?)?; + wl_storage.write(key, &value)?; Ok(value) } #[cfg(test)] mod tests { use eyre::{eyre, Result}; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::storage; use namada_state::testing::TestWlStorage; use namada_storage::{StorageRead, StorageWrite}; @@ -57,9 +56,8 @@ mod tests { .expect("could not set up test"); let value = 21i32; let mut wl_storage = TestWlStorage::default(); - let serialized = value.serialize_to_vec(); wl_storage - .write_bytes(&key, serialized) + .write(&key, value) .expect("could not set up test"); super::value(&mut wl_storage, &key, |v: &mut i32| *v *= 2)?; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 5f4c9e6423..8830059b63 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,5 +1,5 @@ use eyre::{Result, WrapErr}; -use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::hints; use namada_core::types::storage::Key; use namada_core::types::voting_power::FractionalVotingPower; @@ -21,19 +21,15 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write_bytes(&keys.body(), &body.serialize_to_vec())?; - wl_storage.write_bytes(&keys.seen(), &tally.seen.serialize_to_vec())?; - wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec())?; - wl_storage.write_bytes( - &keys.voting_power(), - &tally.voting_power.serialize_to_vec(), - )?; + wl_storage.write(&keys.body(), body)?; + wl_storage.write(&keys.seen(), tally.seen)?; + wl_storage.write(&keys.seen_by(), tally.seen_by.clone())?; + wl_storage.write(&keys.voting_power(), tally.voting_power.clone())?; if !already_present { // add the current epoch for the inserted event - wl_storage.write_bytes( + wl_storage.write( &keys.voting_started_epoch(), - &wl_storage.storage.get_current_epoch().0.serialize_to_vec(), + wl_storage.storage.get_current_epoch().0, )?; } Ok(()) @@ -138,6 +134,7 @@ mod tests { use std::collections::BTreeMap; use assert_matches::assert_matches; + use namada_core::borsh::BorshSerializeExt; use namada_core::types::ethereum_events::EthereumEvent; use super::*; @@ -239,25 +236,16 @@ mod tests { seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; + wl_storage.write(&keys.body(), &event).unwrap(); + wl_storage.write(&keys.seen(), tally.seen).unwrap(); + wl_storage.write(&keys.seen_by(), &tally.seen_by).unwrap(); wl_storage - .write_bytes(&keys.body(), &event.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes(&keys.seen(), &tally.seen.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes( - &keys.voting_power(), - &tally.voting_power.serialize_to_vec(), - ) + .write(&keys.voting_power(), &tally.voting_power) .unwrap(); wl_storage - .write_bytes( + .write( &keys.voting_started_epoch(), - &wl_storage.storage.get_block_height().0.serialize_to_vec(), + wl_storage.storage.get_block_height().0, ) .unwrap(); diff --git a/crates/ethereum_bridge/src/storage/parameters.rs b/crates/ethereum_bridge/src/storage/parameters.rs index dd420de67b..ead8b612f4 100644 --- a/crates/ethereum_bridge/src/storage/parameters.rs +++ b/crates/ethereum_bridge/src/storage/parameters.rs @@ -4,9 +4,9 @@ use std::num::NonZeroU64; use eyre::{eyre, Result}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::ethereum_structs; use namada_core::types::storage::Key; use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; -use namada_core::types::{encode, ethereum_structs}; use namada_state::{DBIter, StorageHasher, WlStorage, DB}; use namada_storage::{StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; @@ -188,22 +188,18 @@ impl EthereumBridgeParams { let bridge_contract_key = bridge_storage::bridge_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); wl_storage - .write_bytes( + .write( &active_key, - encode(&EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis)), + EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); wl_storage - .write_bytes(&min_confirmations_key, encode(min_confirmations)) + .write(&min_confirmations_key, min_confirmations) .unwrap(); + wl_storage.write(&native_erc20_key, native_erc20).unwrap(); + wl_storage.write(&bridge_contract_key, bridge).unwrap(); wl_storage - .write_bytes(&native_erc20_key, encode(native_erc20)) - .unwrap(); - wl_storage - .write_bytes(&bridge_contract_key, encode(bridge)) - .unwrap(); - wl_storage - .write_bytes(ð_start_height_key, encode(eth_start_height)) + .write(ð_start_height_key, eth_start_height) .unwrap(); for Erc20WhitelistEntry { token_address: addr, @@ -225,21 +221,21 @@ impl EthereumBridgeParams { suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage.write_bytes(&key, encode(&true)).unwrap(); + wl_storage.write(&key, true).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write_bytes(&key, encode(&cap)).unwrap(); + wl_storage.write(&key, cap).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write_bytes(&key, encode(&denom)).unwrap(); + wl_storage.write(&key, denom).unwrap(); } // Initialize the storage for the Ethereum Bridge VP. vp::ethereum_bridge::init_storage(wl_storage); @@ -368,7 +364,6 @@ where #[cfg(test)] mod tests { use eyre::Result; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::ethereum_events::EthAddress; use namada_state::testing::TestWlStorage; @@ -462,16 +457,16 @@ mod tests { fn test_ethereum_bridge_config_storage_partially_configured() { let mut wl_storage = TestWlStorage::default(); wl_storage - .write_bytes( + .write( &bridge_storage::active_key(), - encode(&EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis)), + EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); // Write a valid min_confirmations value wl_storage - .write_bytes( + .write( &bridge_storage::min_confirmations_key(), - MinimumConfirmations::default().serialize_to_vec(), + MinimumConfirmations::default(), ) .unwrap(); diff --git a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs index 4f84b855ab..644555548b 100644 --- a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs +++ b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs @@ -1,4 +1,3 @@ -use namada_core::borsh::BorshSerializeExt; use namada_core::ledger::eth_bridge::ADDRESS; use namada_core::types::hash::StorageHasher; use namada_state::{DBIter, WlStorage, DB}; @@ -16,10 +15,8 @@ where H: StorageHasher, { let escrow_key = balance_key(&wl_storage.storage.native_token, &ADDRESS); - wl_storage - .write_bytes(&escrow_key, Amount::default().serialize_to_vec()) - .expect( - "Initializing the escrow balance of the Ethereum Bridge VP \ - shouldn't fail.", - ); + wl_storage.write(&escrow_key, Amount::default()).expect( + "Initializing the escrow balance of the Ethereum Bridge VP shouldn't \ + fail.", + ); } diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 82fa60eaa7..a67c99402e 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -1623,16 +1623,13 @@ mod test_bridge_pool_vp { let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); wl_storage - .write_bytes(&eb_account_key, Amount::default().serialize_to_vec()) + .write(&eb_account_key, Amount::default()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = balance_key(&nam(), &established_address_1()); wl_storage - .write_bytes( - &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH).serialize_to_vec(), - ) + .write(&gas_payer_balance_key, Amount::from(BERTHA_WEALTH)) .expect("Test failed"); wl_storage.write_log.commit_tx(); let tx = Tx::from_type(TxType::Raw); diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index 34b204e2e4..b72fe7d13f 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -219,10 +219,9 @@ mod tests { &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); wl_storage - .write_bytes( + .write( &balance_key, - Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE) - .serialize_to_vec(), + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE), ) .expect("Test failed"); diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index 9caaec48a6..d64f445c28 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -286,8 +286,6 @@ where S: StorageRead + StorageWrite, { let key = storage::get_max_signatures_per_transaction_key(); - // Using `fn write_bytes` here, because implicit_vp doesn't need to be - // encoded, it's bytes already. storage.write(&key, value) } diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 334ddbb97f..92ee5f3ca8 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -1079,10 +1079,7 @@ mod test_ethbridge_router { client.wl_storage.storage.block.height = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height @@ -1122,10 +1119,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height @@ -1141,10 +1135,7 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // commit the changes and increase block height @@ -1187,10 +1178,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1209,18 +1197,15 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( + .write( &get_signed_root_key(), - (signed_root.clone(), written_height).serialize_to_vec(), + (signed_root.clone(), written_height), ) .expect("Test failed"); @@ -1303,10 +1288,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1328,19 +1310,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, BlockHeight::from(0))) .expect("Test failed"); // commit the changes and increase block height @@ -1398,10 +1374,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1420,19 +1393,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height @@ -1473,10 +1440,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); let event_transfer: namada_core::types::ethereum_events::TransferToEthereum @@ -1490,17 +1454,16 @@ mod test_ethbridge_router { let voting_power = FractionalVotingPower::HALF; client .wl_storage - .write_bytes(ð_msg_key.body(), eth_event.serialize_to_vec()) + .write(ð_msg_key.body(), eth_event) .expect("Test failed"); client .wl_storage - .write_bytes( + .write( ð_msg_key.voting_power(), EpochedVotingPower::from([( 0.into(), voting_power * dummy_validator_stake, - )]) - .serialize_to_vec(), + )]), ) .expect("Test failed"); client @@ -1520,10 +1483,7 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // commit the changes and increase block height @@ -1572,10 +1532,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1594,19 +1551,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height @@ -1739,10 +1690,7 @@ mod test_ethbridge_router { }; client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), transfer.clone()) .expect("Test failed"); // write transfers into the event log @@ -1781,10 +1729,7 @@ mod test_ethbridge_router { let written_height = client.wl_storage.storage.block.height; client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); client .wl_storage diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index f4826b3d41..47f401f7d9 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -60,7 +60,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { log_string("Escrow succeeded"); // add transfer into the pool let pending_key = get_pending_key(&transfer); - ctx.write_bytes(&pending_key, transfer.serialize_to_vec()) + ctx.write(&pending_key, transfer) .wrap_err("Could not write transfer to bridge pool")?; Ok(()) } From 1e609599073a053325017e135f67d8fd7cdf6e58 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:21:46 -0500 Subject: [PATCH 07/12] fix ibc storage init --- crates/namada/src/ledger/ibc/mod.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index 4f0a9970be..45f987e7a8 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -15,24 +15,23 @@ where // In ibc-go, u64 like a counter is encoded with big-endian: // https://github.com/cosmos/ibc-go/blob/89ffaafb5956a5ea606e1f1bf249c880bea802ed/modules/core/04-channel/keeper/keeper.go#L115 + let init_value = 0_u64; + // the client counter let key = client_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write(&key, init_value) .expect("Unable to write the initial client counter"); // the connection counter let key = connection_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write(&key, init_value) .expect("Unable to write the initial connection counter"); // the channel counter let key = channel_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write(&key, init_value) .expect("Unable to write the initial channel counter"); } From 2ac37f0148ec015f42ad29d4c4180f9724b471ea Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:22:22 -0500 Subject: [PATCH 08/12] fix imports --- crates/apps/src/lib/node/ledger/storage/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 04ca1020de..fa375fc743 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -60,7 +60,9 @@ mod tests { use namada::ledger::ibc::storage::ibc_key; use namada::ledger::parameters::{EpochDuration, Parameters}; use namada::state::write_log::WriteLog; - use namada::state::{self, StorageWrite, StoreType, WlStorage}; + use namada::state::{ + self, StorageRead, StorageWrite, StoreType, WlStorage, DB, + }; use namada::token::conversion::update_allowed_conversions; use namada::types::chain::ChainId; use namada::types::ethereum_events::Uint; @@ -70,7 +72,6 @@ mod tests { use namada::types::time::DurationSecs; use namada::types::{address, storage}; use namada::{parameters, token, types}; - use namada_sdk::storage::{StorageRead, DB}; use proptest::collection::vec; use proptest::prelude::*; use proptest::test_runner::Config; From 343f27ff0203e294c92adc041d191b3d30c1c6d1 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 17 Jan 2024 19:50:23 -0500 Subject: [PATCH 09/12] handle non-merklized values in `State` methods --- crates/state/src/lib.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 5770439fb7..769a651e67 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -346,7 +346,7 @@ where /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { Ok(( - self.block.tree.has_key(key)?, + self.db.read_subspace_val(key)?.is_some(), key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, )) } @@ -354,10 +354,6 @@ where /// Returns a value from the specified subspace and the gas cost pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { tracing::debug!("storage read key {}", key); - let (present, gas) = self.has_key(key)?; - if !present { - return Ok((None, gas)); - } match self.db.read_subspace_val(key)? { Some(v) => { From 790acb740974b359f042db07f0872cd0b3cfe628 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:03:16 -0500 Subject: [PATCH 10/12] edit comments --- crates/state/src/lib.rs | 6 +++--- crates/state/src/write_log.rs | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 769a651e67..5d3a3a1aae 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -365,8 +365,8 @@ where } } - /// Returns a value from the specified subspace at the given height or the - /// last committed height when 0 and the gas cost. + /// Returns a value from the specified subspace at the given height (or the + /// last committed height when 0) and the gas cost. pub fn read_with_height( &self, key: &Key, @@ -654,7 +654,7 @@ where let key = Key::parse(old.0.clone()) .expect("the key should be parsable"); - if !(self.merkle_tree_key_filter)(&key) { + if (self.merkle_tree_key_filter)(&key) { tree.delete(&key)?; } diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 28ca56cd2d..4250985c55 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -525,6 +525,7 @@ impl WriteLog { } } + // Replay protections specifically for (hash, entry) in self.replay_protection.iter() { match entry { ReProtStorageModification::Write => storage From 050b5e8e19d2e4f79a41192ded89ead624d35c12 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 24 Jan 2024 17:30:23 -0500 Subject: [PATCH 11/12] fix spelling of `merkelized` --- .../lib/node/ledger/shell/finalize_block.rs | 2 +- crates/apps/src/lib/node/ledger/shell/mod.rs | 8 +++----- .../apps/src/lib/node/ledger/storage/mod.rs | 20 +++++++++---------- .../ethereum_bridge/bridge_pool_vp.rs | 2 +- crates/state/src/lib.rs | 4 ++-- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index 9707ee7811..cbd2b3d3f2 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2850,7 +2850,7 @@ mod test_finalize_block { /// Test that replay protection keys are not added to the merkle tree #[test] - fn test_replay_keys_not_merkelized() { + fn test_replay_keys_not_merklized() { let (mut shell, _, _, _) = setup(); let (wrapper_tx, processed_tx) = diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index c6547f6eb4..7888d86e4b 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -364,10 +364,8 @@ where } /// Merkle tree storage key filter. Return `false` for keys that shouldn't be -/// merkelized. -pub fn is_merkelized_storage_key( - key: &namada_sdk::types::storage::Key, -) -> bool { +/// merklized. +pub fn is_merklized_storage_key(key: &namada_sdk::types::storage::Key) -> bool { !token::storage_key::is_masp_key(key) && !namada::ibc::storage::is_ibc_counter_key(key) } @@ -440,7 +438,7 @@ where native_token, db_cache, config.shell.storage_read_past_height_limit, - is_merkelized_storage_key, + is_merklized_storage_key, ); storage .load_last_state() diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index fa375fc743..ae4822a072 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -78,7 +78,7 @@ mod tests { use tempfile::TempDir; use super::*; - use crate::node::ledger::shell::is_merkelized_storage_key; + use crate::node::ledger::shell::is_merklized_storage_key; #[test] fn test_crud_value() { @@ -90,7 +90,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; @@ -143,7 +143,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -245,7 +245,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); storage .load_last_state() @@ -270,7 +270,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -317,7 +317,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -385,7 +385,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); // 1. For each `blocks_write_value`, write the current block height if @@ -479,7 +479,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); let num_keys = 5; @@ -598,7 +598,7 @@ mod tests { address::nam(), None, Some(5), - is_merkelized_storage_key, + is_merklized_storage_key, ); let new_epoch_start = BlockHeight(1); let signed_root_key = bridge_pool::get_signed_root_key(); @@ -704,7 +704,7 @@ mod tests { address::nam(), None, None, - is_merkelized_storage_key, + is_merklized_storage_key, ); let mut storage = WlStorage { storage, diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index a67c99402e..19e99a7475 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -920,7 +920,7 @@ mod test_bridge_pool_vp { address::nam(), None, None, - namada_sdk::state::merkelize_all_keys, + namada_sdk::state::merklize_all_keys, ), write_log: Default::default(), }; diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 5d3a3a1aae..a8289c260a 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -144,7 +144,7 @@ pub struct BlockStorage { pub pred_epochs: Epochs, } -pub fn merkelize_all_keys(_key: &storage::Key) -> bool { +pub fn merklize_all_keys(_key: &storage::Key) -> bool { true } @@ -1151,7 +1151,7 @@ pub mod testing { ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit: Some(1000), - merkle_tree_key_filter: merkelize_all_keys, + merkle_tree_key_filter: merklize_all_keys, } } } From f56e9f01ac913be9c6acfa914b66604dff5c83e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 25 Jan 2024 12:26:55 +0000 Subject: [PATCH 12/12] changelog: add #2438 --- .changelog/unreleased/improvements/2438-new-storage-write.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/2438-new-storage-write.md diff --git a/.changelog/unreleased/improvements/2438-new-storage-write.md b/.changelog/unreleased/improvements/2438-new-storage-write.md new file mode 100644 index 0000000000..c3e02a44b9 --- /dev/null +++ b/.changelog/unreleased/improvements/2438-new-storage-write.md @@ -0,0 +1,2 @@ +- Skip writing some MASP and IBC storage keys to merkle tree and DB diffs. + ([\#2438](https://github.com/anoma/namada/pull/2438)) \ No newline at end of file