From eb824087fdd0763f00e695e303d7ad6c56f96538 Mon Sep 17 00:00:00 2001 From: robin-near <111538878+robin-near@users.noreply.github.com> Date: Mon, 13 Nov 2023 23:16:34 -0800 Subject: [PATCH] [Memtrie] (5/5) Integrate memtries into the protocol. (#10087) This is a large PR, but it does a few independent things - these independent things could in theory be split, but their tests are somewhat interdependent so splitting is tricky. * Incorporates MemTries into Trie, so that lookups, writes, and recording are done via memtries if it is available. This logic is done by adding a `lookup_from_memory` function, which is invoked by `get_optimized_ref` as a first choice. * The testing of this logic is in `trie_recording.rs`; previously this test performs T (access via on-disk trie), F (access via flat storage), RT (access via recorded state proof, with trie-like gas accounting), and RF (access via recorded state proof, with flat storage-like gas accounting), and asserts that T = RT, and F = RF. Now, we additionally perform in-memory versions of these setups (use M for that), thereby asserting that T = MT = RT, and F = MF = RF. * Memtrie updates are produced via `Trie::update`, which uses the memtrie to produce the update if available. * Incorporates a map into ShardTries, so that a collection of memtries can be maintained. * Memtrie updates are applied via `WrappedTrieChanges::apply_memtrie_changes`; the block height is additionally included into `WrappedTrieChanges` to facilitate memtrie GC. * Initial loading is done by calling runtime's load_mem_tries_on_startup. It is called from the chain after ensuring that flat storage is available (which is critical when populating genesis for the first time). * Three configuration parameters are added to configure the memtrie loading behavior. These configs should immediately be usable. * GC is called from the chain at the same time as when flat storage GC happens. * An integration test has been added in `integration-tests/.../in_memory_tries.rs`, which brings up two nodes with different memtrie loading configs and asserts that under a workload of various cross-shard transactions the two nodes remain consistent, even with frequent forks. It also tests both the initial loading behavior as well as behavior after restarts, and also sanity checks GC logic. --- chain/chain/src/chain.rs | 31 ++ chain/chain/src/store.rs | 1 + chain/chain/src/test_utils/kv_runtime.rs | 46 +-- chain/chain/src/tests/gc.rs | 1 + chain/chain/src/types.rs | 7 + .../client/src/test_utils/test_env_builder.rs | 9 +- core/store/src/config.rs | 10 + core/store/src/test_utils.rs | 22 +- core/store/src/trie/config.rs | 8 +- core/store/src/trie/mem/mod.rs | 2 +- core/store/src/trie/mod.rs | 121 +++++- core/store/src/trie/shard_tries.rs | 109 ++++- core/store/src/trie/trie_recording.rs | 363 ++++++++++++---- .../src/tests/client/features.rs | 1 + .../tests/client/features/in_memory_tries.rs | 390 ++++++++++++++++++ .../src/tests/client/state_snapshot.rs | 2 + nearcore/src/runtime/mod.rs | 34 ++ nearcore/src/test_utils.rs | 51 ++- tools/state-viewer/src/state_changes.rs | 1 + 19 files changed, 1070 insertions(+), 139 deletions(-) create mode 100644 integration-tests/src/tests/client/features/in_memory_tries.rs diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index ba5f08a7355..b74aa4bd8e2 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -699,6 +699,16 @@ impl Chain { }; store_update.commit()?; + // We must load in-memory tries here, and not inside runtime, because + // if we were initializing from genesis, the runtime would be + // initialized when no blocks or flat storage were initialized. We + // require flat storage in order to load in-memory tries. + // TODO(#9511): The calculation of shard UIDs is not precise in the case + // of resharding. We need to revisit this. + let tip = store.head()?; + let shard_uids = epoch_manager.get_shard_layout(&tip.epoch_id)?.get_shard_uids(); + runtime_adapter.load_mem_tries_on_startup(&shard_uids)?; + info!(target: "chain", "Init: header head @ #{} {}; block head @ #{} {}", header_head.height, header_head.last_block_hash, block_head.height, block_head.last_block_hash); @@ -2401,6 +2411,7 @@ impl Chain { let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; let flat_storage_manager = self.runtime_adapter.get_flat_storage_manager(); flat_storage_manager.update_flat_storage_for_shard(shard_uid, &block)?; + self.garbage_collect_memtrie_roots(&block, shard_uid); } } @@ -2447,6 +2458,17 @@ impl Chain { Ok(AcceptedBlock { hash: *block.hash(), status: block_status, provenance }) } + fn garbage_collect_memtrie_roots(&self, block: &Block, shard_uid: ShardUId) { + let tries = self.runtime_adapter.get_tries(); + let last_final_block = block.header().last_final_block(); + if last_final_block != &CryptoHash::default() { + let header = self.store.get_block_header(last_final_block).unwrap(); + if let Some(prev_height) = header.prev_height() { + tries.delete_memtrie_roots_up_to_height(shard_uid, prev_height); + } + } + } + /// Preprocess a block before applying chunks, verify that we have the necessary information /// to process the block an the block is valid. /// Note that this function does NOT introduce any changes to chain state. @@ -3564,6 +3586,7 @@ impl Chain { let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; let flat_storage_manager = self.runtime_adapter.get_flat_storage_manager(); flat_storage_manager.update_flat_storage_for_shard(shard_uid, &block)?; + self.garbage_collect_memtrie_roots(&block, shard_uid); } } @@ -4084,6 +4107,7 @@ impl Chain { )?; let block_hash = *block.hash(); + let block_height = block.header().height(); let challenges_result = block.header().challenges_result().clone(); let block_timestamp = block.header().raw_timestamp(); let next_gas_price = prev_block.header().next_gas_price(); @@ -4129,6 +4153,7 @@ impl Chain { epoch_manager.as_ref(), runtime.as_ref(), &block_hash, + block_height, &prev_block_hash, &apply_result, split_state_roots, @@ -4165,6 +4190,7 @@ impl Chain { let new_extra = self.get_chunk_extra(&prev_block_hash, &shard_uid)?; let block_hash = *block.hash(); + let block_height = block.header().height(); let challenges_result = block.header().challenges_result().clone(); let block_timestamp = block.header().raw_timestamp(); let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(&prev_block_hash)?; @@ -4216,6 +4242,7 @@ impl Chain { epoch_manager.as_ref(), runtime.as_ref(), &block_hash, + block_height, &prev_block_hash, &apply_result, split_state_roots, @@ -4249,6 +4276,7 @@ impl Chain { let state_changes = self.store().get_state_changes_for_split_states(block.hash(), shard_id)?; let block_hash = *block.hash(); + let block_height = block.header().height(); Ok(Some(Box::new(move |parent_span| -> Result { let _span = tracing::debug_span!( target: "chain", @@ -4259,6 +4287,7 @@ impl Chain { .entered(); let results = runtime.apply_update_to_split_states( &block_hash, + block_height, split_state_roots, &next_epoch_shard_layout, state_changes, @@ -5154,6 +5183,7 @@ impl<'a> ChainUpdate<'a> { epoch_manager: &dyn EpochManagerAdapter, runtime_adapter: &dyn RuntimeAdapter, block_hash: &CryptoHash, + block_height: BlockHeight, prev_block_hash: &CryptoHash, apply_result: &ApplyTransactionResult, split_state_roots: Option>, @@ -5170,6 +5200,7 @@ impl<'a> ChainUpdate<'a> { if let Some(state_roots) = split_state_roots { let split_state_results = runtime_adapter.apply_update_to_split_states( block_hash, + block_height, state_roots, &next_epoch_shard_layout, state_changes, diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index 9906643df44..bef2be9082a 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -3174,6 +3174,7 @@ impl<'a> ChainStoreUpdate<'a> { // from the store. let mut deletions_store_update = self.store().store_update(); for mut wrapped_trie_changes in self.trie_changes.drain(..) { + wrapped_trie_changes.apply_mem_changes(); wrapped_trie_changes.insertions_into(&mut store_update); wrapped_trie_changes.deletions_into(&mut deletions_store_update); wrapped_trie_changes.state_changes_into(&mut store_update); diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 076e2f1d40c..f6e2cc44355 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -1,18 +1,14 @@ -use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::sync::{Arc, RwLock}; - +use super::ValidatorSchedule; +use crate::types::{ + ApplySplitStateResult, ApplyTransactionResult, RuntimeAdapter, RuntimeStorageConfig, +}; +use crate::BlockHeader; use borsh::{BorshDeserialize, BorshSerialize}; - -use near_epoch_manager::types::BlockHeaderInfo; -use near_epoch_manager::{EpochManagerAdapter, RngSeed}; -use near_primitives::state_part::PartId; -use near_store::test_utils::TestTriesBuilder; -use num_rational::Ratio; - use near_chain_configs::{ProtocolConfig, DEFAULT_GC_NUM_EPOCHS_TO_KEEP}; use near_chain_primitives::Error; use near_crypto::{KeyType, PublicKey, SecretKey, Signature}; +use near_epoch_manager::types::BlockHeaderInfo; +use near_epoch_manager::{EpochManagerAdapter, RngSeed}; use near_pool::types::PoolIterator; use near_primitives::account::{AccessKey, Account}; use near_primitives::block_header::{Approval, ApprovalInner}; @@ -20,6 +16,7 @@ use near_primitives::challenge::ChallengesResult; use near_primitives::epoch_manager::block_info::BlockInfo; use near_primitives::epoch_manager::epoch_info::EpochInfo; use near_primitives::epoch_manager::EpochConfig; +use near_primitives::epoch_manager::ShardConfig; use near_primitives::epoch_manager::ValidatorSelectionConfig; use near_primitives::errors::{EpochError, InvalidTxError}; use near_primitives::hash::{hash, CryptoHash}; @@ -27,6 +24,7 @@ use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum}; use near_primitives::shard_layout; use near_primitives::shard_layout::{ShardLayout, ShardUId}; use near_primitives::sharding::ChunkHash; +use near_primitives::state_part::PartId; use near_primitives::transaction::{ Action, ExecutionMetadata, ExecutionOutcome, ExecutionOutcomeWithId, ExecutionStatus, SignedTransaction, TransferAction, @@ -41,19 +39,15 @@ use near_primitives::views::{ AccessKeyInfoView, AccessKeyList, CallResult, ContractCodeView, EpochValidatorInfo, QueryRequest, QueryResponse, QueryResponseKind, ViewStateResult, }; +use near_store::test_utils::TestTriesBuilder; use near_store::{ - set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, Store, StoreUpdate, Trie, - TrieChanges, WrappedTrieChanges, -}; - -use crate::types::{ - ApplySplitStateResult, ApplyTransactionResult, RuntimeAdapter, RuntimeStorageConfig, + set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, StorageError, Store, StoreUpdate, + Trie, TrieChanges, WrappedTrieChanges, }; -use crate::BlockHeader; - -use near_primitives::epoch_manager::ShardConfig; - -use super::ValidatorSchedule; +use num_rational::Ratio; +use std::cmp::Ordering; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::sync::{Arc, RwLock}; /// Simple key value runtime for tests. /// @@ -1038,7 +1032,7 @@ impl RuntimeAdapter for KeyValueRuntime { &self, shard_id: ShardId, storage_config: RuntimeStorageConfig, - _height: BlockHeight, + height: BlockHeight, _block_timestamp: u64, _prev_block_hash: &CryptoHash, block_hash: &CryptoHash, @@ -1191,6 +1185,7 @@ impl RuntimeAdapter for KeyValueRuntime { TrieChanges::empty(state_root), Default::default(), *block_hash, + height, ), new_root: state_root, outcomes: tx_results, @@ -1382,10 +1377,15 @@ impl RuntimeAdapter for KeyValueRuntime { fn apply_update_to_split_states( &self, _block_hash: &CryptoHash, + _block_height: BlockHeight, _state_roots: HashMap, _next_shard_layout: &ShardLayout, _state_changes: StateChangesForSplitStates, ) -> Result, Error> { Ok(vec![]) } + + fn load_mem_tries_on_startup(&self, _shard_uids: &[ShardUId]) -> Result<(), StorageError> { + Ok(()) + } } diff --git a/chain/chain/src/tests/gc.rs b/chain/chain/src/tests/gc.rs index d120f5a8566..898894d8396 100644 --- a/chain/chain/src/tests/gc.rs +++ b/chain/chain/src/tests/gc.rs @@ -139,6 +139,7 @@ fn do_fork( trie_changes, Default::default(), *block.hash(), + block.header().height(), ); store_update.save_trie_changes(wrapped_trie_changes); diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index 8abb046a387..b62ca4af7a4 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -6,6 +6,7 @@ use chrono::Utc; use near_chain_configs::StateSplitConfig; use near_primitives::sandbox::state_patch::SandboxStatePatch; use near_store::flat::FlatStorageManager; +use near_store::StorageError; use num_rational::Rational32; use near_chain_configs::{Genesis, ProtocolConfig}; @@ -392,6 +393,7 @@ pub trait RuntimeAdapter: Send + Sync { fn apply_update_to_split_states( &self, block_hash: &CryptoHash, + block_height: BlockHeight, state_roots: HashMap, next_shard_layout: &ShardLayout, state_changes: StateChangesForSplitStates, @@ -426,6 +428,11 @@ pub trait RuntimeAdapter: Send + Sync { ) -> bool; fn get_protocol_config(&self, epoch_id: &EpochId) -> Result; + + /// Loads in-memory tries upon startup. The given shard_uids are possible candidates to load, + /// but which exact shards to load depends on configuration. This may only be called when flat + /// storage is ready. + fn load_mem_tries_on_startup(&self, shard_uids: &[ShardUId]) -> Result<(), StorageError>; } /// The last known / checked height and time when we have processed it. diff --git a/chain/client/src/test_utils/test_env_builder.rs b/chain/client/src/test_utils/test_env_builder.rs index c526f85359b..02dbdeee8ce 100644 --- a/chain/client/src/test_utils/test_env_builder.rs +++ b/chain/client/src/test_utils/test_env_builder.rs @@ -19,7 +19,7 @@ use near_network::test_utils::MockPeerManagerAdapter; use near_primitives::epoch_manager::{AllEpochConfigTestOverrides, RngSeed}; use near_primitives::types::{AccountId, NumShards}; use near_store::test_utils::create_test_store; -use near_store::{NodeStorage, ShardUId, Store, StoreConfig}; +use near_store::{NodeStorage, ShardUId, Store, StoreConfig, TrieConfig}; use super::setup::{setup_client_with_runtime, setup_synchronous_shards_manager}; use super::test_env::TestEnv; @@ -299,11 +299,13 @@ impl TestEnvBuilder { pub fn internal_initialize_nightshade_runtimes( self, runtime_configs: Vec, + trie_configs: Vec, nightshade_runtime_creator: impl Fn( PathBuf, Store, Arc, RuntimeConfigStore, + TrieConfig, ) -> Arc, ) -> Self { let builder = self.ensure_home_dirs().ensure_epoch_managers().ensure_stores(); @@ -312,15 +314,16 @@ impl TestEnvBuilder { builder.stores.clone().unwrap(), builder.epoch_managers.clone().unwrap(), runtime_configs, + trie_configs, )) - .map(|(home_dir, store, epoch_manager, runtime_config)| { + .map(|(home_dir, store, epoch_manager, runtime_config, trie_config)| { let epoch_manager = match epoch_manager { EpochManagerKind::Mock(_) => { panic!("NightshadeRuntime can only be instantiated with EpochManagerHandle") } EpochManagerKind::Handle(handle) => handle, }; - nightshade_runtime_creator(home_dir, store, epoch_manager, runtime_config) + nightshade_runtime_creator(home_dir, store, epoch_manager, runtime_config, trie_config) }) .collect(); builder.runtimes(runtimes) diff --git a/core/store/src/config.rs b/core/store/src/config.rs index 61b9a2149ee..2e31c560822 100644 --- a/core/store/src/config.rs +++ b/core/store/src/config.rs @@ -66,6 +66,13 @@ pub struct StoreConfig { /// This config option is temporary and will be removed once flat storage is implemented. pub sweat_prefetch_senders: Vec, + /// List of shard UIDs for which we should load the tries in memory. + /// TODO(#9511): This does not automatically survive resharding. We may need to figure out a + /// strategy for that. + pub load_mem_tries_for_shards: Vec, + /// If true, load mem tries for all shards; this has priority over `load_mem_tries_for_shards`. + pub load_mem_tries_for_all_shards: bool, + /// Path where to create RocksDB checkpoints during database migrations or /// `false` to disable that feature. /// @@ -262,6 +269,9 @@ impl Default for StoreConfig { "sweat_the_oracle.testnet".to_owned(), ], + load_mem_tries_for_shards: vec![ShardUId { shard_id: 3, version: 1 }], + load_mem_tries_for_all_shards: false, + migration_snapshot: Default::default(), // We checked that this number of threads doesn't impact diff --git a/core/store/src/test_utils.rs b/core/store/src/test_utils.rs index a73cc506554..774a34276e6 100644 --- a/core/store/src/test_utils.rs +++ b/core/store/src/test_utils.rs @@ -69,11 +69,18 @@ pub struct TestTriesBuilder { shard_version: ShardVersion, num_shards: NumShards, enable_flat_storage: bool, + enable_in_memory_tries: bool, } impl TestTriesBuilder { pub fn new() -> Self { - Self { store: None, shard_version: 0, num_shards: 1, enable_flat_storage: false } + Self { + store: None, + shard_version: 0, + num_shards: 1, + enable_flat_storage: false, + enable_in_memory_tries: false, + } } pub fn with_store(mut self, store: Store) -> Self { @@ -92,6 +99,11 @@ impl TestTriesBuilder { self } + pub fn with_in_memory_tries(mut self) -> Self { + self.enable_in_memory_tries = true; + self + } + pub fn build(self) -> ShardTries { let store = self.store.unwrap_or_else(create_test_store); let shard_uids = (0..self.num_shards) @@ -100,7 +112,10 @@ impl TestTriesBuilder { let flat_storage_manager = FlatStorageManager::new(store.clone()); let tries = ShardTries::new( store, - TrieConfig::default(), + TrieConfig { + load_mem_tries_for_all_shards: self.enable_in_memory_tries, + ..Default::default() + }, &shard_uids, flat_storage_manager, StateSnapshotConfig::default(), @@ -131,6 +146,9 @@ impl TestTriesBuilder { flat_storage_manager.create_flat_storage_for_shard(shard_uid).unwrap(); } } + if self.enable_in_memory_tries { + tries.load_mem_tries_for_enabled_shards(&shard_uids).unwrap(); + } tries } } diff --git a/core/store/src/trie/config.rs b/core/store/src/trie/config.rs index 59669b41305..49630a74196 100644 --- a/core/store/src/trie/config.rs +++ b/core/store/src/trie/config.rs @@ -1,5 +1,6 @@ use crate::config::TrieCacheConfig; use crate::StoreConfig; +use near_primitives::shard_layout::ShardUId; use near_primitives::types::AccountId; use std::str::FromStr; use tracing::error; @@ -20,7 +21,7 @@ pub(crate) const DEFAULT_SHARD_CACHE_DELETIONS_QUEUE_CAPACITY: usize = const TRIE_LIMIT_CACHED_VALUE_SIZE: usize = 1000; /// Stores necessary configuration for the creation of tries. -#[derive(Default)] +#[derive(Clone, Default)] pub struct TrieConfig { pub shard_cache_config: TrieCacheConfig, pub view_shard_cache_config: TrieCacheConfig, @@ -30,6 +31,9 @@ pub struct TrieConfig { pub sweat_prefetch_receivers: Vec, /// List of allowed predecessor accounts for SWEAT prefetching. pub sweat_prefetch_senders: Vec, + /// List of shards we will load into memory. + pub load_mem_tries_for_shards: Vec, + pub load_mem_tries_for_all_shards: bool, } impl TrieConfig { @@ -53,6 +57,8 @@ impl TrieConfig { Err(e) => error!(target: "config", "invalid account id {account}: {e}"), } } + this.load_mem_tries_for_shards = config.load_mem_tries_for_shards.clone(); + this.load_mem_tries_for_all_shards = config.load_mem_tries_for_all_shards; this } diff --git a/core/store/src/trie/mem/mod.rs b/core/store/src/trie/mem/mod.rs index ef6d720ef54..884f4351025 100644 --- a/core/store/src/trie/mem/mod.rs +++ b/core/store/src/trie/mem/mod.rs @@ -135,7 +135,7 @@ impl MemTries { .set(self.roots.len() as i64); } - #[cfg(test)] + /// Used for unit testing and integration testing. pub fn num_roots(&self) -> usize { self.heights.iter().map(|(_, v)| v.len()).sum() } diff --git a/core/store/src/trie/mod.rs b/core/store/src/trie/mod.rs index 1a420ce99c9..f08deeac356 100644 --- a/core/store/src/trie/mod.rs +++ b/core/store/src/trie/mod.rs @@ -1,5 +1,7 @@ use self::accounting_cache::TrieAccountingCache; +use self::mem::lookup::memtrie_lookup; use self::mem::updating::{UpdatedMemTrieNode, UpdatedMemTrieNodeId}; +use self::mem::MemTries; use self::trie_recording::TrieRecorder; use self::trie_storage::TrieMemoryPartialStorage; use crate::flat::{FlatStateChanges, FlatStorageChunkView}; @@ -34,7 +36,7 @@ use std::fmt::Write; use std::hash::Hash; use std::rc::Rc; use std::str; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; pub mod accounting_cache; mod config; @@ -330,6 +332,7 @@ impl std::fmt::Debug for TrieNode { pub struct Trie { storage: Rc, + memtries: Option>>, root: StateRoot, /// If present, flat storage is used to look up keys (if asked for). /// Otherwise, we would crawl through the trie. @@ -618,6 +621,15 @@ impl Trie { storage: Rc, root: StateRoot, flat_storage_chunk_view: Option, + ) -> Self { + Self::new_with_memtries(storage, None, root, flat_storage_chunk_view) + } + + pub fn new_with_memtries( + storage: Rc, + memtries: Option>>, + root: StateRoot, + flat_storage_chunk_view: Option, ) -> Self { let accounting_cache = match storage.as_caching_storage() { Some(caching_storage) => RefCell::new(TrieAccountingCache::new(Some(( @@ -628,6 +640,7 @@ impl Trie { }; Trie { storage, + memtries, root, charge_gas_for_trie_node_access: flat_storage_chunk_view.is_none(), flat_storage_chunk_view, @@ -639,8 +652,12 @@ impl Trie { /// Makes a new trie that has everything the same except that access /// through that trie accumulates a state proof for all nodes accessed. pub fn recording_reads(&self) -> Self { - let mut trie = - Self::new(self.storage.clone(), self.root, self.flat_storage_chunk_view.clone()); + let mut trie = Self::new_with_memtries( + self.storage.clone(), + self.memtries.clone(), + self.root, + self.flat_storage_chunk_view.clone(), + ); trie.recorder = Some(RefCell::new(TrieRecorder::new())); trie } @@ -1222,6 +1239,48 @@ impl Trie { } } + /// Retrieves an `OptimizedValueRef` (a hash of or inlined value) for the given + /// key from the in-memory trie. In general, in-memory tries may inline a value + /// if the value is short, but otherwise it would defer the storage of the value + /// to the state column. This method will return whichever the in-memory trie has. + /// Refer to `get_optimized_ref` for the semantics of using the returned type. + /// + /// `charge_gas_for_trie_node_access` is used to control whether Trie node + /// accesses incur any gas. Note that access to values is never charged here; + /// it is only charged when the returned ref is dereferenced. + fn lookup_from_memory( + &self, + key: &[u8], + charge_gas_for_trie_node_access: bool, + ) -> Result, StorageError> { + if self.root == Self::EMPTY_ROOT { + return Ok(None); + } + let lock = self.memtries.as_ref().unwrap().read().unwrap(); + let root = lock.get_root(&self.root).ok_or_else(|| { + StorageError::StorageInconsistentState(format!( + "Failed to find root node {} in memtrie", + self.root + )) + })?; + + let mut accessed_nodes = Vec::new(); + let flat_value = memtrie_lookup(root, key, Some(&mut accessed_nodes)); + if charge_gas_for_trie_node_access { + for (node_hash, serialized_node) in &accessed_nodes { + self.accounting_cache + .borrow_mut() + .retroactively_account(*node_hash, serialized_node.clone()); + } + } + if let Some(recorder) = &self.recorder { + for (node_hash, serialized_node) in accessed_nodes { + recorder.borrow_mut().record(&node_hash, serialized_node); + } + } + Ok(flat_value.map(OptimizedValueRef::from_flat_value)) + } + /// For debugging only. Returns the raw node at the given path starting from the root. /// The format of the nibbles parameter is that each element represents 4 bits of the /// path. (Even though we use a u8 for each element, we only use the lower 4 bits.) @@ -1312,14 +1371,15 @@ impl Trie { key: &[u8], mode: KeyLookupMode, ) -> Result, StorageError> { - if mode == KeyLookupMode::FlatStorage && self.flat_storage_chunk_view.is_some() { + let charge_gas_for_trie_node_access = + mode == KeyLookupMode::Trie || self.charge_gas_for_trie_node_access; + if self.memtries.is_some() { + self.lookup_from_memory(key, charge_gas_for_trie_node_access) + } else if mode == KeyLookupMode::FlatStorage && self.flat_storage_chunk_view.is_some() { self.lookup_from_flat_storage(key) } else { Ok(self - .lookup_from_state_column( - NibbleSlice::new(key), - mode == KeyLookupMode::Trie || self.charge_gas_for_trie_node_access, - )? + .lookup_from_state_column(NibbleSlice::new(key), charge_gas_for_trie_node_access)? .map(OptimizedValueRef::Ref)) } } @@ -1360,21 +1420,40 @@ impl Trie { where I: IntoIterator, Option>)>, { - let mut memory = NodesStorage::new(); - let mut root_node = self.move_node_to_mutable(&mut memory, &self.root)?; - for (key, value) in changes { - let key = NibbleSlice::new(&key); - root_node = match value { - Some(arr) => self.insert(&mut memory, root_node, key, arr), - None => self.delete(&mut memory, root_node, key), - }?; - } + match &self.memtries { + Some(memtries) => { + // If we have in-memory tries, use it to construct the the changes entirely (for + // both in-memory and on-disk updates) because it's much faster. + let guard = memtries.read().unwrap(); + let mut trie_update = guard.update(self.root, true)?; + for (key, value) in changes { + match value { + Some(arr) => { + trie_update.insert(&key, arr); + } + None => trie_update.delete(&key), + } + } + Ok(trie_update.to_trie_changes()) + } + None => { + let mut memory = NodesStorage::new(); + let mut root_node = self.move_node_to_mutable(&mut memory, &self.root)?; + for (key, value) in changes { + let key = NibbleSlice::new(&key); + root_node = match value { + Some(arr) => self.insert(&mut memory, root_node, key, arr), + None => self.delete(&mut memory, root_node, key), + }?; + } - #[cfg(test)] - { - self.memory_usage_verify(&memory, NodeHandle::InMemory(root_node)); + #[cfg(test)] + { + self.memory_usage_verify(&memory, NodeHandle::InMemory(root_node)); + } + Trie::flatten_nodes(&self.root, memory, root_node) + } } - Trie::flatten_nodes(&self.root, memory, root_node) } pub fn iter<'a>(&'a self) -> Result, StorageError> { diff --git a/core/store/src/trie/shard_tries.rs b/core/store/src/trie/shard_tries.rs index 993fb952556..79d208a30d0 100644 --- a/core/store/src/trie/shard_tries.rs +++ b/core/store/src/trie/shard_tries.rs @@ -1,8 +1,11 @@ +use super::mem::MemTries; use super::state_snapshot::{StateSnapshot, StateSnapshotConfig}; use super::TrieRefcountSubtraction; use crate::flat::store_helper::remove_all_state_values; use crate::flat::{FlatStorageManager, FlatStorageStatus}; use crate::trie::config::TrieConfig; +use crate::trie::mem::loading::load_trie_from_flat_state_and_delta; +use crate::trie::mem::updating::apply_memtrie_changes; use crate::trie::prefetching_trie_storage::PrefetchingThreadsHandle; use crate::trie::trie_storage::{TrieCache, TrieCachingStorage}; use crate::trie::{TrieRefcountAddition, POISONED_LOCK_ERR}; @@ -13,15 +16,18 @@ use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::{self, ShardUId}; use near_primitives::trie_key::TrieKey; use near_primitives::types::{ - RawStateChange, RawStateChangesWithTrieKey, StateChangeCause, StateRoot, + BlockHeight, RawStateChange, RawStateChangesWithTrieKey, StateChangeCause, StateRoot, }; +use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; use std::collections::HashMap; use std::rc::Rc; use std::sync::{Arc, RwLock}; +use tracing::info; struct ShardTriesInner { store: Store, trie_config: TrieConfig, + mem_tries: RwLock>>>, /// Cache reserved for client actor to use caches: RwLock>, /// Cache for readers. @@ -55,6 +61,7 @@ impl ShardTries { ShardTries(Arc::new(ShardTriesInner { store, trie_config, + mem_tries: RwLock::new(HashMap::new()), caches: RwLock::new(caches), view_caches: RwLock::new(view_caches), flat_storage_manager, @@ -135,8 +142,12 @@ impl ShardTries { )); let flat_storage_chunk_view = block_hash .and_then(|block_hash| self.0.flat_storage_manager.chunk_view(shard_uid, block_hash)); - - Trie::new(storage, state_root, flat_storage_chunk_view) + Trie::new_with_memtries( + storage, + self.get_mem_tries(shard_uid), + state_root, + flat_storage_chunk_view, + ) } pub fn get_trie_for_shard(&self, shard_uid: ShardUId, state_root: StateRoot) -> Trie { @@ -324,6 +335,29 @@ impl ShardTries { self.apply_all_inner(trie_changes, shard_uid, true, store_update) } + pub fn apply_memtrie_changes( + &self, + trie_changes: &TrieChanges, + shard_uid: ShardUId, + block_height: BlockHeight, + ) { + if let Some(memtries) = self.get_mem_tries(shard_uid) { + apply_memtrie_changes( + &mut memtries.write().unwrap(), + trie_changes + .mem_trie_changes + .as_ref() + .expect("Memtrie changes must be present if memtrie is loaded"), + block_height, + ); + } else { + assert!( + trie_changes.mem_trie_changes.is_none(), + "Memtrie changes must not be present if memtrie is not loaded" + ); + } + } + /// Returns the status of the given shard of flat storage in the state snapshot. /// `sync_prev_prev_hash` needs to match the block hash that identifies that snapshot. pub fn get_snapshot_flat_storage_status( @@ -344,6 +378,55 @@ impl ShardTries { self.0.view_caches.write().expect(POISONED_LOCK_ERR).remove(&shard_uid); remove_all_state_values(store_update, shard_uid); } + + /// Should be called upon startup to load in-memory tries for enabled shards. + pub fn load_mem_tries_for_enabled_shards( + &self, + shard_uids: &[ShardUId], + ) -> Result<(), StorageError> { + let trie_config = &self.0.trie_config; + let shard_uids_to_load = shard_uids + .iter() + .copied() + .filter(|shard_uid| { + trie_config.load_mem_tries_for_all_shards + || trie_config.load_mem_tries_for_shards.contains(shard_uid) + }) + .collect::>(); + let store = self.0.store.clone(); + info!(target: "memtrie", "Loading tries to memory for shards {:?}...", shard_uids_to_load); + shard_uids_to_load + .par_iter() + .map(|shard_uid| -> Result<(), StorageError> { + let mem_tries = load_trie_from_flat_state_and_delta(&store, *shard_uid)?; + self.0 + .mem_tries + .write() + .unwrap() + .insert(*shard_uid, Arc::new(RwLock::new(mem_tries))); + Ok(()) + }) + .collect::>>() + .into_iter() + .collect::>()?; + + info!(target: "memtrie", "Memtries loading complete for shards {:?}", shard_uids_to_load); + Ok(()) + } + + /// Retrieves the in-memory tries for the shard. + pub fn get_mem_tries(&self, shard_uid: ShardUId) -> Option>> { + let guard = self.0.mem_tries.write().unwrap(); + guard.get(&shard_uid).cloned() + } + + /// Garbage collects the in-memory tries for the shard up to (and including) the given + /// height. + pub fn delete_memtrie_roots_up_to_height(&self, shard_uid: ShardUId, height: BlockHeight) { + if let Some(memtries) = self.get_mem_tries(shard_uid) { + memtries.write().unwrap().delete_until_height(height); + } + } } pub struct WrappedTrieChanges { @@ -352,6 +435,7 @@ pub struct WrappedTrieChanges { trie_changes: TrieChanges, state_changes: Vec, block_hash: CryptoHash, + block_height: BlockHeight, } // Partial implementation. Skips `tries` due to its complexity and @@ -364,6 +448,7 @@ impl std::fmt::Debug for WrappedTrieChanges { .field("trie_changes", &"") .field("state_changes", &"") .field("block_hash", &self.block_hash) + .field("block_height", &self.block_height) .finish() } } @@ -375,14 +460,26 @@ impl WrappedTrieChanges { trie_changes: TrieChanges, state_changes: Vec, block_hash: CryptoHash, + block_height: BlockHeight, ) -> Self { - WrappedTrieChanges { tries, shard_uid, trie_changes, state_changes, block_hash } + WrappedTrieChanges { + tries, + shard_uid, + trie_changes, + state_changes, + block_hash, + block_height, + } } pub fn state_changes(&self) -> &[RawStateChangesWithTrieKey] { &self.state_changes } + pub fn apply_mem_changes(&self) { + self.tries.apply_memtrie_changes(&self.trie_changes, self.shard_uid, self.block_height); + } + /// Save insertions of trie nodes into Store. pub fn insertions_into(&self, store_update: &mut StoreUpdate) { self.tries.apply_insertions(&self.trie_changes, self.shard_uid, store_update) @@ -604,6 +701,8 @@ mod test { enable_receipt_prefetching: false, sweat_prefetch_receivers: Vec::new(), sweat_prefetch_senders: Vec::new(), + load_mem_tries_for_shards: Vec::new(), + load_mem_tries_for_all_shards: false, }; let shard_uids = Vec::from([ShardUId::single_shard()]); ShardTries::new( @@ -722,6 +821,8 @@ mod test { enable_receipt_prefetching: false, sweat_prefetch_receivers: Vec::new(), sweat_prefetch_senders: Vec::new(), + load_mem_tries_for_shards: Vec::new(), + load_mem_tries_for_all_shards: false, }; let shard_uids = Vec::from([ShardUId { shard_id: 0, version: 0 }]); let shard_uid = *shard_uids.first().unwrap(); diff --git a/core/store/src/trie/trie_recording.rs b/core/store/src/trie/trie_recording.rs index 1989136ffd2..05ec526e058 100644 --- a/core/store/src/trie/trie_recording.rs +++ b/core/store/src/trie/trie_recording.rs @@ -27,56 +27,174 @@ impl TrieRecorder { #[cfg(test)] mod trie_recording_tests { + use crate::db::refcount::decode_value_with_rc; use crate::test_utils::{ gen_larger_changes, simplify_changes, test_populate_flat_storage, test_populate_trie, TestTriesBuilder, }; - use crate::Trie; - use near_primitives::hash::CryptoHash; - use near_primitives::shard_layout::ShardUId; + use crate::trie::mem::metrics::MEM_TRIE_NUM_LOOKUPS; + use crate::{DBCol, Store, Trie}; + use near_primitives::hash::{hash, CryptoHash}; + use near_primitives::shard_layout::{get_block_shard_uid, get_block_shard_uid_rev, ShardUId}; + use near_primitives::state::ValueRef; + use near_primitives::types::chunk_extra::ChunkExtra; + use near_primitives::types::StateRoot; use near_vm_runner::logic::TrieNodesCount; - use std::collections::HashMap; + use rand::{thread_rng, Rng}; + use std::collections::{HashMap, HashSet}; + use std::num::NonZeroU32; const NUM_ITERATIONS_PER_TEST: usize = 100; + /// Prepared on-disk trie and flat storage for testing. + struct PreparedTrie { + store: Store, + shard_uid: ShardUId, + /// All the data we've put into the trie. + data_in_trie: HashMap, Vec>, + /// The keys that we should be using to call get() on the trie with. + keys_to_get: Vec>, + /// The keys that we should be using to call get_optimized_ref() on the + /// trie with. + keys_to_get_ref: Vec>, + state_root: StateRoot, + } + + /// Prepare a trie for testing; this will prepare both a trie and a flat + /// storage with some dummy block info. If `use_missing_keys` is true, + /// the keys to test with will also include some keys that are not in the + /// trie. + fn prepare_trie(use_missing_keys: bool) -> PreparedTrie { + let tries_for_building = TestTriesBuilder::new().with_flat_storage().build(); + let shard_uid = ShardUId::single_shard(); + let trie_changes = gen_larger_changes(&mut thread_rng(), 50); + let trie_changes = simplify_changes(&trie_changes); + if trie_changes.is_empty() { + // try again + return prepare_trie(use_missing_keys); + } + let state_root = test_populate_trie( + &tries_for_building, + &Trie::EMPTY_ROOT, + shard_uid, + trie_changes.clone(), + ); + test_populate_flat_storage( + &tries_for_building, + shard_uid, + &CryptoHash::default(), + &CryptoHash::default(), + &trie_changes, + ); + + // ChunkExtra is needed for in-memory trie loading code to query state roots. + let chunk_extra = ChunkExtra::new(&state_root, CryptoHash::default(), Vec::new(), 0, 0, 0); + let mut update_for_chunk_extra = tries_for_building.store_update(); + update_for_chunk_extra + .set_ser( + DBCol::ChunkExtra, + &get_block_shard_uid(&CryptoHash::default(), &shard_uid), + &chunk_extra, + ) + .unwrap(); + update_for_chunk_extra.commit().unwrap(); + + let data_in_trie = trie_changes + .iter() + .map(|(key, value)| (key.clone(), value.clone().unwrap())) + .collect::>(); + let (keys_to_get, keys_to_get_ref) = trie_changes + .iter() + .map(|(key, _)| { + let mut key = key.clone(); + if use_missing_keys { + key.push(100); + } + key + }) + .partition::, _>(|_| thread_rng().gen()); + PreparedTrie { + store: tries_for_building.get_store(), + shard_uid, + data_in_trie, + keys_to_get, + keys_to_get_ref, + state_root, + } + } + + /// Delete state that we should not be relying on if in-memory tries are + /// loaded, to help make sure that in-memory tries are used. + /// + /// The only thing we don't delete are the values, which may not be + /// inlined. + fn destructively_delete_in_memory_state_from_disk( + store: &Store, + data_in_trie: &HashMap, Vec>, + ) { + let key_hashes_to_keep = data_in_trie.iter().map(|(_, v)| hash(&v)).collect::>(); + let mut update = store.store_update(); + for result in store.iter_raw_bytes(DBCol::State) { + let (key, value) = result.unwrap(); + let (_, refcount) = decode_value_with_rc(&value); + let (key_hash, _) = get_block_shard_uid_rev(&key).unwrap(); + if !key_hashes_to_keep.contains(&key_hash) { + update.decrement_refcount_by( + DBCol::State, + &key, + NonZeroU32::new(refcount as u32).unwrap(), + ); + } + } + update.delete_all(DBCol::FlatState); + update.commit().unwrap(); + } + /// Verifies that when operating on a trie, the results are completely consistent /// regardless of whether we're operating on the real storage (with or without chunk /// cache), while recording reads, or when operating on recorded partial storage. - fn test_trie_recording_consistency(enable_accounting_cache: bool, use_missing_keys: bool) { - let mut rng = rand::thread_rng(); + fn test_trie_recording_consistency( + enable_accounting_cache: bool, + use_missing_keys: bool, + use_in_memory_tries: bool, + ) { for _ in 0..NUM_ITERATIONS_PER_TEST { - let tries = TestTriesBuilder::new().with_shard_layout(1, 2).with_flat_storage().build(); - - let shard_uid = ShardUId { version: 1, shard_id: 0 }; - let trie_changes = gen_larger_changes(&mut rng, 50); - let trie_changes = simplify_changes(&trie_changes); - if trie_changes.is_empty() { - continue; - } - let state_root = - test_populate_trie(&tries, &Trie::EMPTY_ROOT, shard_uid, trie_changes.clone()); - let data_in_trie = trie_changes - .iter() - .map(|(key, value)| (key.clone(), value.clone().unwrap())) - .collect::>(); - let keys_to_test_with = trie_changes - .iter() - .map(|(key, _)| { - let mut key = key.clone(); - if use_missing_keys { - key.push(100); - } - key - }) - .collect::>(); + let PreparedTrie { + store, + shard_uid, + data_in_trie, + keys_to_get, + keys_to_get_ref, + state_root, + } = prepare_trie(use_missing_keys); + let tries = if use_in_memory_tries { + TestTriesBuilder::new().with_store(store.clone()).with_in_memory_tries().build() + } else { + TestTriesBuilder::new().with_store(store.clone()).build() + }; + let mem_trie_lookup_counts_before = MEM_TRIE_NUM_LOOKUPS.get(); + + if use_in_memory_tries { + // Delete the on-disk state so that we really know we're using + // in-memory tries. + destructively_delete_in_memory_state_from_disk(&store, &data_in_trie); + } // Let's capture the baseline node counts - this is what will happen // in production. let trie = tries.get_trie_for_shard(shard_uid, state_root); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } let baseline_trie_nodes_count = trie.get_trie_nodes_count(); println!("Baseline trie nodes count: {:?}", baseline_trie_nodes_count); @@ -84,9 +202,17 @@ mod trie_recording_tests { // we get are exactly the same. let trie = tries.get_trie_for_shard(shard_uid, state_root).recording_reads(); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); // Now, let's check that when doing the same lookups with the captured partial storage, @@ -95,35 +221,68 @@ mod trie_recording_tests { println!( "Partial storage has {} nodes from {} entries", partial_storage.nodes.len(), - trie_changes.len() + data_in_trie.len() ); let trie = Trie::from_recorded_storage(partial_storage, state_root, false); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::Trie) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); + + if use_in_memory_tries { + // sanity check that we did indeed use in-memory tries. + assert!(MEM_TRIE_NUM_LOOKUPS.get() > mem_trie_lookup_counts_before); + } } } #[test] fn test_trie_recording_consistency_no_accounting_cache() { - test_trie_recording_consistency(false, false); + test_trie_recording_consistency(false, false, false); } #[test] fn test_trie_recording_consistency_with_accounting_cache() { - test_trie_recording_consistency(true, false); + test_trie_recording_consistency(true, false, false); } #[test] fn test_trie_recording_consistency_no_accounting_cache_with_missing_keys() { - test_trie_recording_consistency(false, true); + test_trie_recording_consistency(false, true, false); } #[test] fn test_trie_recording_consistency_with_accounting_cache_and_missing_keys() { - test_trie_recording_consistency(true, true); + test_trie_recording_consistency(true, true, false); + } + + #[test] + fn test_trie_recording_consistency_memtrie_no_accounting_cache() { + test_trie_recording_consistency(false, false, true); + } + + #[test] + fn test_trie_recording_consistency_memtrie_with_accounting_cache() { + test_trie_recording_consistency(true, false, true); + } + + #[test] + fn test_trie_recording_consistency_memtrie_no_accounting_cache_with_missing_keys() { + test_trie_recording_consistency(false, true, true); + } + + #[test] + fn test_trie_recording_consistency_memtrie_with_accounting_cache_and_missing_keys() { + test_trie_recording_consistency(true, true, true); } /// Verifies that when operating on a trie, the results are completely consistent @@ -133,43 +292,34 @@ mod trie_recording_tests { fn test_trie_recording_consistency_with_flat_storage( enable_accounting_cache: bool, use_missing_keys: bool, + use_in_memory_tries: bool, ) { - let mut rng = rand::thread_rng(); for _ in 0..NUM_ITERATIONS_PER_TEST { - let tries = TestTriesBuilder::new().with_shard_layout(1, 2).with_flat_storage().build(); - - let shard_uid = ShardUId { version: 1, shard_id: 0 }; - let trie_changes = gen_larger_changes(&mut rng, 50); - let trie_changes = simplify_changes(&trie_changes); - if trie_changes.is_empty() { - continue; - } - let state_root = - test_populate_trie(&tries, &Trie::EMPTY_ROOT, shard_uid, trie_changes.clone()); - test_populate_flat_storage( - &tries, + let PreparedTrie { + store, shard_uid, - &CryptoHash::default(), - &CryptoHash::default(), - &trie_changes, - ); + data_in_trie, + keys_to_get, + keys_to_get_ref, + state_root, + } = prepare_trie(use_missing_keys); + let tries = if use_in_memory_tries { + TestTriesBuilder::new() + .with_store(store.clone()) + .with_flat_storage() + .with_in_memory_tries() + .build() + } else { + TestTriesBuilder::new().with_store(store.clone()).with_flat_storage().build() + }; + let mem_trie_lookup_counts_before = MEM_TRIE_NUM_LOOKUPS.get(); - let data_in_trie = trie_changes - .iter() - .map(|(key, value)| (key.clone(), value.clone().unwrap())) - .collect::>(); - let keys_to_test_with = trie_changes - .iter() - .map(|(key, _)| { - let mut key = key.clone(); - if use_missing_keys { - key.push(100); - } - key - }) - .collect::>(); - - // First, check that the trie is using flat storage, so that counters are all zero. + if use_in_memory_tries { + // Delete the on-disk state so that we really know we're using + // in-memory tries. + destructively_delete_in_memory_state_from_disk(&store, &data_in_trie); + } + // Check that the trie is using flat storage, so that counters are all zero. // Only use get_optimized_ref(), because get() will actually dereference values which can // cause trie reads. let trie = tries.get_trie_with_block_hash_for_shard( @@ -178,8 +328,8 @@ mod trie_recording_tests { &CryptoHash::default(), false, ); - for key in &keys_to_test_with { - trie.get_optimized_ref(&key, crate::KeyLookupMode::FlatStorage).unwrap(); + for key in data_in_trie.keys() { + trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage).unwrap(); } assert_eq!(trie.get_trie_nodes_count(), TrieNodesCount { db_reads: 0, mem_reads: 0 }); @@ -192,9 +342,17 @@ mod trie_recording_tests { false, ); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } let baseline_trie_nodes_count = trie.get_trie_nodes_count(); println!("Baseline trie nodes count: {:?}", baseline_trie_nodes_count); @@ -209,9 +367,17 @@ mod trie_recording_tests { ) .recording_reads(); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); // Now, let's check that when doing the same lookups with the captured partial storage, @@ -220,34 +386,69 @@ mod trie_recording_tests { println!( "Partial storage has {} nodes from {} entries", partial_storage.nodes.len(), - trie_changes.len() + data_in_trie.len() ); let trie = Trie::from_recorded_storage(partial_storage, state_root, true); trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache); - for key in &keys_to_test_with { + for key in &keys_to_get { assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned()); } + for key in &keys_to_get_ref { + assert_eq!( + trie.get_optimized_ref(key, crate::KeyLookupMode::FlatStorage) + .unwrap() + .map(|value| value.into_value_ref()), + data_in_trie.get(key).map(|value| ValueRef::new(&value)) + ); + } assert_eq!(trie.get_trie_nodes_count(), baseline_trie_nodes_count); + + if use_in_memory_tries { + // sanity check that we did indeed use in-memory tries. + assert!(MEM_TRIE_NUM_LOOKUPS.get() > mem_trie_lookup_counts_before); + } } } #[test] fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(false, false); + test_trie_recording_consistency_with_flat_storage(false, false, false); } #[test] fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache() { - test_trie_recording_consistency_with_flat_storage(true, false); + test_trie_recording_consistency_with_flat_storage(true, false, false); } #[test] fn test_trie_recording_consistency_with_flat_storage_no_accounting_cache_with_missing_keys() { - test_trie_recording_consistency_with_flat_storage(false, true); + test_trie_recording_consistency_with_flat_storage(false, true, false); } #[test] fn test_trie_recording_consistency_with_flat_storage_with_accounting_cache_and_missing_keys() { - test_trie_recording_consistency_with_flat_storage(true, true); + test_trie_recording_consistency_with_flat_storage(true, true, false); + } + + #[test] + fn test_trie_recording_consistency_with_flat_storage_memtrie_no_accounting_cache() { + test_trie_recording_consistency_with_flat_storage(false, false, true); + } + + #[test] + fn test_trie_recording_consistency_with_flat_storage_memtrie_with_accounting_cache() { + test_trie_recording_consistency_with_flat_storage(true, false, true); + } + + #[test] + fn test_trie_recording_consistency_with_flat_storage_memtrie_no_accounting_cache_with_missing_keys( + ) { + test_trie_recording_consistency_with_flat_storage(false, true, true); + } + + #[test] + fn test_trie_recording_consistency_with_flat_storage_memtrie_with_accounting_cache_and_missing_keys( + ) { + test_trie_recording_consistency_with_flat_storage(true, true, true); } } diff --git a/integration-tests/src/tests/client/features.rs b/integration-tests/src/tests/client/features.rs index 758b2064a1c..81da1a41ec9 100644 --- a/integration-tests/src/tests/client/features.rs +++ b/integration-tests/src/tests/client/features.rs @@ -10,6 +10,7 @@ mod delegate_action; mod fix_contract_loading_cost; mod fix_storage_usage; mod flat_storage; +mod in_memory_tries; mod increase_deployment_cost; mod increase_storage_compute_cost; mod limit_contract_functions_number; diff --git a/integration-tests/src/tests/client/features/in_memory_tries.rs b/integration-tests/src/tests/client/features/in_memory_tries.rs new file mode 100644 index 00000000000..fe258758ae6 --- /dev/null +++ b/integration-tests/src/tests/client/features/in_memory_tries.rs @@ -0,0 +1,390 @@ +use std::collections::{HashMap, HashSet}; + +use near_chain::{ChainGenesis, Provenance}; +use near_chain_configs::{Genesis, GenesisConfig, GenesisRecords}; +use near_client::test_utils::TestEnv; +use near_client::ProcessTxResponse; +use near_o11y::testonly::init_test_logger; +use near_primitives::block::Tip; +use near_primitives::shard_layout::ShardLayout; +use near_primitives::state_record::StateRecord; +use near_primitives::test_utils::{create_test_signer, create_user_test_signer}; +use near_primitives::transaction::SignedTransaction; +use near_primitives::types::{AccountInfo, EpochId}; +use near_primitives_core::account::{AccessKey, Account}; +use near_primitives_core::hash::CryptoHash; +use near_primitives_core::types::AccountId; +use near_primitives_core::version::PROTOCOL_VERSION; +use near_store::test_utils::create_test_store; +use near_store::{ShardUId, TrieConfig}; +use nearcore::test_utils::TestEnvNightshadeSetupExt; +use rand::seq::IteratorRandom; +use rand::{thread_rng, Rng}; + +const ONE_NEAR: u128 = 1_000_000_000_000_000_000_000_000; + +#[test] +fn test_in_memory_trie_node_consistency() { + // Recommended to run with RUST_LOG=memtrie=debug,chunks=error,info + init_test_logger(); + let validator_stake = 1000000 * ONE_NEAR; + let initial_balance = 10000 * ONE_NEAR; + let accounts = + (0..100).map(|i| format!("account{}", i).parse().unwrap()).collect::>(); + let mut genesis_config = GenesisConfig { + // Use the latest protocol version. Otherwise, the version may be too + // old that e.g. blocks don't even store previous heights. + protocol_version: PROTOCOL_VERSION, + // Some arbitrary starting height. Doesn't matter. + genesis_height: 10000, + // We'll test with 4 shards. This can be any number, but we want to test + // the case when some shards are loaded into memory and others are not. + // We pick the boundaries so that each shard would get some transactions. + shard_layout: ShardLayout::v1( + vec!["account3", "account5", "account7"] + .into_iter() + .map(|a| a.parse().unwrap()) + .collect(), + None, + 1, + ), + // We're going to send NEAR between accounts and then assert at the end + // that these transactions have been processed correctly, so here we set + // the gas price to 0 so that we don't have to calculate gas cost. + min_gas_price: 0, + max_gas_price: 0, + // Set the block gas limit high enough so we don't have to worry about + // transactions being throttled. + gas_limit: 100000000000000, + // Set the validity period high enough so even if a transaction gets + // included a few blocks later it won't be rejected. + transaction_validity_period: 1000, + // Make two validators. In this test we don't care about validators but + // the TestEnv framework works best if all clients are validators. So + // since we are using two clients, make two validators. + validators: vec![ + AccountInfo { + account_id: accounts[0].clone(), + amount: validator_stake, + public_key: create_test_signer(accounts[0].as_str()).public_key(), + }, + AccountInfo { + account_id: accounts[1].clone(), + amount: validator_stake, + public_key: create_test_signer(accounts[1].as_str()).public_key(), + }, + ], + // We don't care about epoch transitions in this test, and epoch + // transitions means validator selection, which can kick out validators + // (due to our test purposefully skipping blocks to create forks), and + // that's annoying to deal with. So set this to a high value to stay + // within a single epoch. + epoch_length: 10000, + // The genesis requires this, so set it to something arbitrary. + protocol_treasury_account: accounts[2].clone(), + // Simply make all validators block producers. + num_block_producer_seats: 2, + // Make all validators produce chunks for all shards. + minimum_validators_per_shard: 2, + // Even though not used for the most recent protocol version, + // this must still have the same length as the number of shards, + // or else the genesis fails validation. + num_block_producer_seats_per_shard: vec![2, 2, 2, 2], + ..Default::default() + }; + + // We'll now create the initial records. We'll set up 100 accounts, each + // with some initial balance. We'll add an access key to each account so + // we can send transactions from them. + let mut records = Vec::new(); + for (i, account) in accounts.iter().enumerate() { + // The staked amount must be consistent with validators from genesis. + let staked = if i < 2 { validator_stake } else { 0 }; + records.push(StateRecord::Account { + account_id: account.clone(), + account: Account::new(initial_balance, staked, CryptoHash::default(), 0), + }); + records.push(StateRecord::AccessKey { + account_id: account.clone(), + public_key: create_user_test_signer(&account).public_key, + access_key: AccessKey::full_access(), + }); + // The total supply must be correct to pass validation. + genesis_config.total_supply += initial_balance + staked; + } + let genesis = Genesis::new(genesis_config, GenesisRecords(records)).unwrap(); + let chain_genesis = ChainGenesis::new(&genesis); + + // Create two stores, one for each node. We'll be reusing the stores later + // to emulate node restarts. + let stores = vec![create_test_store(), create_test_store()]; + let mut env = TestEnv::builder(chain_genesis.clone()) + .clients(vec!["account0".parse().unwrap(), "account1".parse().unwrap()]) + .stores(stores.clone()) + .real_epoch_managers(&genesis.config) + .track_all_shards() + .nightshade_runtimes_with_trie_config( + &genesis, + vec![ + TrieConfig::default(), // client 0 does not load in-memory tries + TrieConfig { + // client 1 loads two of four shards into in-memory tries + load_mem_tries_for_shards: vec![ + ShardUId { version: 1, shard_id: 0 }, + ShardUId { version: 1, shard_id: 2 }, + ], + ..Default::default() + }, + ], + ) + .build(); + + // Sanity check that we should have two block producers. + assert_eq!( + env.clients[0] + .epoch_manager + .get_epoch_block_producers_ordered( + &EpochId::default(), + &env.clients[0].chain.head().unwrap().last_block_hash + ) + .unwrap() + .len(), + 2 + ); + + // First, start up the nodes from genesis. This ensures that in-memory + // tries works correctly when starting up an empty node for the first time. + let mut nonces = + accounts.iter().map(|account| (account.clone(), 0)).collect::>(); + let mut balances = accounts + .iter() + .map(|account| (account.clone(), initial_balance)) + .collect::>(); + + run_chain_for_some_blocks_while_sending_money_around(&mut env, &mut nonces, &mut balances, 100); + // Sanity check that in-memory tries are loaded, and garbage collected properly. + // We should have 4 roots for each loaded shard, because we maintain in-memory + // roots until (and including) the prev block of the last final block. So if the + // head is N, then we have roots for N, N - 1, N - 2 (final), and N - 3. + assert_eq!(num_memtrie_roots(&env, 0, "s0.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s1.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s2.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s3.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s0.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 1, "s1.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s2.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 1, "s3.v1".parse().unwrap()), None); + + // Restart nodes, and change some configs. + drop(env); + let mut env = TestEnv::builder(chain_genesis.clone()) + .clients(vec!["account0".parse().unwrap(), "account1".parse().unwrap()]) + .stores(stores.clone()) + .real_epoch_managers(&genesis.config) + .track_all_shards() + .nightshade_runtimes_with_trie_config( + &genesis, + vec![ + TrieConfig::default(), + TrieConfig { + load_mem_tries_for_shards: vec![ + ShardUId { version: 1, shard_id: 0 }, + ShardUId { version: 1, shard_id: 1 }, // shard 2 changed to shard 1. + ], + ..Default::default() + }, + ], + ) + .build(); + run_chain_for_some_blocks_while_sending_money_around(&mut env, &mut nonces, &mut balances, 100); + assert_eq!(num_memtrie_roots(&env, 0, "s0.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s1.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s2.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s3.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s0.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 1, "s1.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 1, "s2.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s3.v1".parse().unwrap()), None); + + // Restart again, but this time flip the nodes. + drop(env); + let mut env = TestEnv::builder(chain_genesis) + .clients(vec!["account0".parse().unwrap(), "account1".parse().unwrap()]) + .stores(stores) + .real_epoch_managers(&genesis.config) + .track_all_shards() + .nightshade_runtimes_with_trie_config( + &genesis, + vec![ + // client 0 now loads in-memory tries + TrieConfig { + load_mem_tries_for_shards: vec![ + ShardUId { version: 1, shard_id: 1 }, + ShardUId { version: 1, shard_id: 3 }, + ], + ..Default::default() + }, + // client 1 no longer loads in-memory tries + TrieConfig::default(), + ], + ) + .build(); + run_chain_for_some_blocks_while_sending_money_around(&mut env, &mut nonces, &mut balances, 100); + assert_eq!(num_memtrie_roots(&env, 0, "s0.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s1.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 0, "s2.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 0, "s3.v1".parse().unwrap()), Some(4)); + assert_eq!(num_memtrie_roots(&env, 1, "s0.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s1.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s2.v1".parse().unwrap()), None); + assert_eq!(num_memtrie_roots(&env, 1, "s3.v1".parse().unwrap()), None); +} + +// Returns the block producer for the height of head + height_offset. +fn get_block_producer(env: &TestEnv, head: &Tip, height_offset: u64) -> AccountId { + let client = &env.clients[0]; + let epoch_manager = &client.epoch_manager; + let parent_hash = &head.last_block_hash; + let epoch_id = epoch_manager.get_epoch_id_from_prev_block(parent_hash).unwrap(); + let height = head.height + height_offset; + let block_producer = epoch_manager.get_block_producer(&epoch_id, height).unwrap(); + block_producer +} + +/// Runs the chain for some number of blocks, sending money around randomly between +/// the test accounts, updating the corresponding nonces and balances. At the end, +/// check that the balances are correct, i.e. the transactions have been executed +/// correctly. If this runs successfully, it would also mean that the two nodes +/// being tested are consistent with each other. If, for example, there is a state +/// root mismatch issue, the two nodes would not be able to apply each others' +/// blocks because the block hashes would be different. +fn run_chain_for_some_blocks_while_sending_money_around( + env: &mut TestEnv, + nonces: &mut HashMap, + balances: &mut HashMap, + num_rounds: usize, +) { + // Run the chain for some extra blocks, to ensure that all transactions are + // included in the chain and are executed completely. + for round in 0..(num_rounds + 10) { + let heads = env + .clients + .iter() + .map(|client| client.chain.head().unwrap().last_block_hash) + .collect::>(); + assert_eq!(heads.len(), 1, "All clients should have the same head"); + let tip = env.clients[0].chain.head().unwrap(); + + if round < num_rounds { + // Make 50 random transactions that send money between random accounts. + for _ in 0..50 { + let sender = nonces.keys().choose(&mut thread_rng()).unwrap().clone(); + let receiver = nonces.keys().choose(&mut thread_rng()).unwrap().clone(); + let nonce = nonces.get_mut(&sender).unwrap(); + *nonce += 1; + + let txn = SignedTransaction::send_money( + *nonce, + sender.clone(), + receiver.clone(), + &create_user_test_signer(&sender), + ONE_NEAR, + tip.last_block_hash, + ); + match env.clients[0].process_tx(txn, false, false) { + ProcessTxResponse::NoResponse => panic!("No response"), + ProcessTxResponse::InvalidTx(err) => panic!("Invalid tx: {}", err), + _ => {} + } + *balances.get_mut(&sender).unwrap() -= ONE_NEAR; + *balances.get_mut(&receiver).unwrap() += ONE_NEAR; + } + } + + let cur_block_producer = get_block_producer(&env, &tip, 1); + let next_block_producer = get_block_producer(&env, &tip, 2); + println!("Producing block at height {} by {}", tip.height + 1, cur_block_producer); + let block = env.client(&cur_block_producer).produce_block(tip.height + 1).unwrap().unwrap(); + + // Let's produce some skip blocks too so that we test that in-memory tries are able to + // deal with forks. + // At the end, finish with a bunch of non-skip blocks so that we can test that in-memory + // trie garbage collection works properly (final block is N - 2 so we should keep no more + // than 3 roots). + let mut skip_block = None; + if cur_block_producer != next_block_producer + && round < num_rounds + && thread_rng().gen_bool(0.5) + { + println!( + "Producing skip block at height {} by {}", + tip.height + 2, + next_block_producer + ); + // Produce some skip blocks too so that we test that in-memory tries are able to deal + // with forks. + skip_block = Some( + env.client(&next_block_producer).produce_block(tip.height + 2).unwrap().unwrap(), + ); + } + + // Apply height + 1 block. + for i in 0..env.clients.len() { + println!( + " Applying block at height {} at {}", + block.header().height(), + env.get_client_id(i) + ); + let blocks_processed = + env.clients[i].process_block_test(block.clone().into(), Provenance::NONE).unwrap(); + assert_eq!(blocks_processed, vec![*block.hash()]); + } + // Apply skip block if one was produced. + if let Some(skip_block) = skip_block { + for i in 0..env.clients.len() { + println!( + " Applying skip block at height {} at {}", + skip_block.header().height(), + env.get_client_id(i) + ); + let blocks_processed = env.clients[i] + .process_block_test(skip_block.clone().into(), Provenance::NONE) + .unwrap(); + assert_eq!(blocks_processed, vec![*skip_block.hash()]); + } + } + + // Send partial encoded chunks around so that the newly produced chunks + // can be included and processed in the next block. Having to do this + // sucks, because this test has nothing to do with partial encoded + // chunks, but it is the unfortunate reality when using TestEnv with + // multiple nodes. + env.process_partial_encoded_chunks(); + for j in 0..env.clients.len() { + env.process_shards_manager_responses_and_finish_processing_blocks(j); + } + } + + for (account, balance) in balances { + assert_eq!( + env.query_balance(account.clone()), + *balance, + "Balance mismatch for {}", + account, + ); + } +} + +/// Returns the number of memtrie roots for the given client and shard, or +/// None if that shard does not load memtries. +fn num_memtrie_roots(env: &TestEnv, client_id: usize, shard: ShardUId) -> Option { + Some( + env.clients[client_id] + .runtime_adapter + .get_tries() + .get_mem_tries(shard)? + .read() + .unwrap() + .num_roots(), + ) +} diff --git a/integration-tests/src/tests/client/state_snapshot.rs b/integration-tests/src/tests/client/state_snapshot.rs index 398ead29b78..750eae0e45a 100644 --- a/integration-tests/src/tests/client/state_snapshot.rs +++ b/integration-tests/src/tests/client/state_snapshot.rs @@ -45,6 +45,8 @@ impl StateSnaptshotTestEnv { enable_receipt_prefetching: false, sweat_prefetch_receivers: Vec::new(), sweat_prefetch_senders: Vec::new(), + load_mem_tries_for_shards: Vec::new(), + load_mem_tries_for_all_shards: false, }; let flat_storage_manager = FlatStorageManager::new(store.clone()); let shard_uids = [ShardUId::single_shard()]; diff --git a/nearcore/src/runtime/mod.rs b/nearcore/src/runtime/mod.rs index 4b8aac8c356..b06b7ad57d1 100644 --- a/nearcore/src/runtime/mod.rs +++ b/nearcore/src/runtime/mod.rs @@ -192,6 +192,33 @@ impl NightshadeRuntime { ) } + pub fn test_with_trie_config( + home_dir: &Path, + store: Store, + genesis_config: &GenesisConfig, + epoch_manager: Arc, + trie_config: TrieConfig, + state_snapshot_type: StateSnapshotType, + ) -> Arc { + Self::new( + store, + genesis_config, + epoch_manager, + None, + None, + None, + DEFAULT_GC_NUM_EPOCHS_TO_KEEP, + trie_config, + StateSnapshotConfig { + state_snapshot_type, + home_dir: home_dir.to_path_buf(), + hot_store_path: PathBuf::from("data"), + state_snapshot_subdir: PathBuf::from("state_snapshot"), + compaction_enabled: false, + }, + ) + } + pub fn test( home_dir: &Path, store: Store, @@ -432,6 +459,7 @@ impl NightshadeRuntime { apply_result.trie_changes, apply_result.state_changes, *block_hash, + apply_state.block_height, ), new_root: apply_result.state_root, outcomes: apply_result.outcomes, @@ -1054,6 +1082,7 @@ impl RuntimeAdapter for NightshadeRuntime { fn apply_update_to_split_states( &self, block_hash: &CryptoHash, + block_height: BlockHeight, state_roots: HashMap, next_epoch_shard_layout: &ShardLayout, state_changes_for_split_states: StateChangesForSplitStates, @@ -1081,6 +1110,7 @@ impl RuntimeAdapter for NightshadeRuntime { trie_changes, state_changes, *block_hash, + block_height, ); applied_split_state_results.push(ApplySplitStateResult { shard_uid, @@ -1192,6 +1222,10 @@ impl RuntimeAdapter for NightshadeRuntime { let epoch_manager = self.epoch_manager.read(); Ok(epoch_manager.will_shard_layout_change(parent_hash)?) } + + fn load_mem_tries_on_startup(&self, shard_uids: &[ShardUId]) -> Result<(), StorageError> { + self.tries.load_mem_tries_for_enabled_shards(shard_uids) + } } impl node_runtime::adapter::ViewRuntimeAdapter for NightshadeRuntime { diff --git a/nearcore/src/test_utils.rs b/nearcore/src/test_utils.rs index 6850f0cc36c..ec748389b89 100644 --- a/nearcore/src/test_utils.rs +++ b/nearcore/src/test_utils.rs @@ -4,7 +4,7 @@ use near_client::test_utils::TestEnvBuilder; use near_epoch_manager::EpochManagerHandle; use near_primitives::runtime::config_store::RuntimeConfigStore; use near_store::genesis::initialize_genesis_state; -use near_store::Store; +use near_store::{Store, TrieConfig}; use std::path::PathBuf; use std::sync::Arc; @@ -17,6 +17,11 @@ pub trait TestEnvNightshadeSetupExt { genesis: &Genesis, runtime_configs: Vec, ) -> Self; + fn nightshade_runtimes_with_trie_config( + self, + genesis: &Genesis, + trie_configs: Vec, + ) -> Self; } impl TestEnvNightshadeSetupExt for TestEnvBuilder { @@ -34,7 +39,8 @@ impl TestEnvNightshadeSetupExt for TestEnvBuilder { let nightshade_runtime_creator = |home_dir: PathBuf, store: Store, epoch_manager: Arc, - runtime_config: RuntimeConfigStore| + runtime_config: RuntimeConfigStore, + _| -> Arc { // TODO: It's not ideal to initialize genesis state with the nightshade runtime here for tests // Tests that don't use nightshade runtime have genesis initialized in kv_runtime. @@ -50,6 +56,45 @@ impl TestEnvNightshadeSetupExt for TestEnvBuilder { state_snapshot_type.clone(), ) }; - self.internal_initialize_nightshade_runtimes(runtime_configs, nightshade_runtime_creator) + let dummy_trie_configs = vec![TrieConfig::default(); self.num_clients()]; + self.internal_initialize_nightshade_runtimes( + runtime_configs, + dummy_trie_configs, + nightshade_runtime_creator, + ) + } + + fn nightshade_runtimes_with_trie_config( + self, + genesis: &Genesis, + trie_configs: Vec, + ) -> Self { + let state_snapshot_type = self.state_snapshot_type(); + let nightshade_runtime_creator = |home_dir: PathBuf, + store: Store, + epoch_manager: Arc, + _, + trie_config: TrieConfig| + -> Arc { + // TODO: It's not ideal to initialize genesis state with the nightshade runtime here for tests + // Tests that don't use nightshade runtime have genesis initialized in kv_runtime. + // We should instead try to do this while configuring store. + let home_dir = home_dir.as_path(); + initialize_genesis_state(store.clone(), genesis, Some(home_dir)); + NightshadeRuntime::test_with_trie_config( + home_dir, + store, + &genesis.config, + epoch_manager, + trie_config, + state_snapshot_type.clone(), + ) + }; + let dummy_runtime_configs = vec![RuntimeConfigStore::test(); self.num_clients()]; + self.internal_initialize_nightshade_runtimes( + dummy_runtime_configs, + trie_configs, + nightshade_runtime_creator, + ) } } diff --git a/tools/state-viewer/src/state_changes.rs b/tools/state-viewer/src/state_changes.rs index 9dee25a6c7a..4ec39df8877 100644 --- a/tools/state-viewer/src/state_changes.rs +++ b/tools/state-viewer/src/state_changes.rs @@ -189,6 +189,7 @@ fn apply_state_changes( trie_update, state_changes, *block_hash, + block_height, ); let mut store_update = chain_store.store_update(); store_update.save_trie_changes(wrapped_trie_changes);