Skip to content

Commit

Permalink
[cleanup] Rename mem_trie to memtrie (#12725)
Browse files Browse the repository at this point in the history
No functional change, only change of variable name
  • Loading branch information
shreyan-gupta authored Jan 13, 2025
1 parent 71f8ed3 commit 88d1a9d
Show file tree
Hide file tree
Showing 30 changed files with 162 additions and 162 deletions.
4 changes: 2 additions & 2 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ impl Chain {
let head_protocol_version = epoch_manager.get_epoch_protocol_version(&tip.epoch_id)?;
let shard_uids_pending_resharding = epoch_manager
.get_shard_uids_pending_resharding(head_protocol_version, PROTOCOL_VERSION)?;
runtime_adapter.get_tries().load_mem_tries_for_enabled_shards(
runtime_adapter.get_tries().load_memtries_for_enabled_shards(
&tracked_shards,
&shard_uids_pending_resharding,
true,
Expand Down Expand Up @@ -2043,7 +2043,7 @@ impl Chain {

if self.epoch_manager.is_next_block_epoch_start(block.header().prev_hash())? {
// Keep in memory only these tries that we care about this or next epoch.
self.runtime_adapter.get_tries().retain_mem_tries(&shards_cares_this_or_next_epoch);
self.runtime_adapter.get_tries().retain_memtries(&shards_cares_this_or_next_epoch);
}

if let Err(err) = self.garbage_collect_state_transition_data(&block) {
Expand Down
14 changes: 7 additions & 7 deletions chain/chain/src/resharding/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use near_primitives::types::chunk_extra::ChunkExtra;
use near_store::adapter::trie_store::get_shard_uid_mapping;
use near_store::adapter::{StoreAdapter, StoreUpdateAdapter};
use near_store::flat::BlockInfo;
use near_store::trie::mem::mem_trie_update::TrackingMode;
use near_store::trie::mem::memtrie_update::TrackingMode;
use near_store::trie::ops::resharding::RetainMode;
use near_store::trie::outgoing_metadata::ReceiptGroupsQueue;
use near_store::trie::TrieRecorder;
Expand Down Expand Up @@ -197,7 +197,7 @@ impl ReshardingManager {
(split_shard_event.left_child_shard, RetainMode::Left),
(split_shard_event.right_child_shard, RetainMode::Right),
] {
let Some(mem_tries) = tries.get_mem_tries(new_shard_uid) else {
let Some(memtries) = tries.get_memtries(new_shard_uid) else {
tracing::error!(
"Memtrie not loaded. Cannot process memtrie resharding storage
update for block {:?}, shard {:?}",
Expand All @@ -211,15 +211,15 @@ impl ReshardingManager {
target: "resharding", ?new_shard_uid, ?retain_mode,
"Creating child memtrie by retaining nodes in parent memtrie..."
);
let mut mem_tries = mem_tries.write().unwrap();
let mut memtries = memtries.write().unwrap();
let mut trie_recorder = TrieRecorder::new(None);
let mode = TrackingMode::RefcountsAndAccesses(&mut trie_recorder);
let mem_trie_update = mem_tries.update(*parent_chunk_extra.state_root(), mode)?;
let memtrie_update = memtries.update(*parent_chunk_extra.state_root(), mode)?;

let trie_changes = mem_trie_update.retain_split_shard(&boundary_account, retain_mode);
let trie_changes = memtrie_update.retain_split_shard(&boundary_account, retain_mode);
let memtrie_changes = trie_changes.memtrie_changes.as_ref().unwrap();
let new_state_root = mem_tries.apply_memtrie_changes(block_height, memtrie_changes);
drop(mem_tries);
let new_state_root = memtries.apply_memtrie_changes(block_height, memtrie_changes);
drop(memtries);

// Get the congestion info for the child.
let parent_epoch_id = block.header().epoch_id();
Expand Down
4 changes: 2 additions & 2 deletions chain/client/src/sync/state/shard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ pub(super) async fn run_state_sync_for_shard(

return_if_cancelled!(cancel);
*status.lock().unwrap() = ShardSyncStatus::StateApplyInProgress;
runtime.get_tries().unload_mem_trie(&shard_uid);
runtime.get_tries().unload_memtrie(&shard_uid);
let mut store_update = store.store_update();
runtime
.get_flat_storage_manager()
Expand Down Expand Up @@ -182,7 +182,7 @@ pub(super) async fn run_state_sync_for_shard(
let shard_uids_pending_resharding = epoch_manager
.get_shard_uids_pending_resharding(head_protocol_version, PROTOCOL_VERSION)?;
handle.set_status("Loading memtrie");
runtime.get_tries().load_mem_trie_on_catchup(
runtime.get_tries().load_memtrie_on_catchup(
&shard_uid,
&state_root,
&shard_uids_pending_resharding,
Expand Down
12 changes: 7 additions & 5 deletions core/store/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,11 @@ pub struct StoreConfig {
/// List of shard UIDs for which we should load the tries in memory.
/// TODO(#9511): This does not automatically survive resharding. We may need to figure out a
/// strategy for that.
pub load_mem_tries_for_shards: Vec<ShardUId>,
/// If true, load mem trie for each shard being tracked; this has priority over `load_mem_tries_for_shards`.
pub load_mem_tries_for_tracked_shards: bool,
#[serde(rename = "load_mem_tries_for_shards")]
pub load_memtries_for_shards: Vec<ShardUId>,
/// If true, load mem trie for each shard being tracked; this has priority over `load_memtries_for_shards`.
#[serde(rename = "load_mem_tries_for_tracked_shards")]
pub load_memtries_for_tracked_shards: bool,

/// Path where to create RocksDB checkpoints during database migrations or
/// `false` to disable that feature.
Expand Down Expand Up @@ -289,8 +291,8 @@ impl Default for StoreConfig {
// Doesn't work for resharding.
// It will speed up processing of shards where it is enabled, but
// requires more RAM and takes several minutes on startup.
load_mem_tries_for_shards: Default::default(),
load_mem_tries_for_tracked_shards: false,
load_memtries_for_shards: Default::default(),
load_memtries_for_tracked_shards: false,

migration_snapshot: Default::default(),

Expand Down
4 changes: 2 additions & 2 deletions core/store/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ impl TestTriesBuilder {
let tries = ShardTries::new(
store.trie_store(),
TrieConfig {
load_mem_tries_for_tracked_shards: self.enable_in_memory_tries,
load_memtries_for_tracked_shards: self.enable_in_memory_tries,
..Default::default()
},
&shard_uids,
Expand Down Expand Up @@ -187,7 +187,7 @@ impl TestTriesBuilder {
}
update_for_chunk_extra.commit().unwrap();

tries.load_mem_tries_for_enabled_shards(&shard_uids, &[].into(), false).unwrap();
tries.load_memtries_for_enabled_shards(&shard_uids, &[].into(), false).unwrap();
}
tries
}
Expand Down
8 changes: 4 additions & 4 deletions core/store/src/trie/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ pub struct TrieConfig {
pub kaiching_prefetch_config: Vec<PrefetchConfig>,

/// List of shards we will load into memory.
pub load_mem_tries_for_shards: Vec<ShardUId>,
pub load_memtries_for_shards: Vec<ShardUId>,
/// Whether mem-trie should be loaded for each tracked shard.
pub load_mem_tries_for_tracked_shards: bool,
pub load_memtries_for_tracked_shards: bool,
}

impl TrieConfig {
Expand All @@ -62,8 +62,8 @@ impl TrieConfig {
}
this.claim_sweat_prefetch_config.clone_from(&config.claim_sweat_prefetch_config);
this.kaiching_prefetch_config.clone_from(&config.kaiching_prefetch_config);
this.load_mem_tries_for_shards.clone_from(&config.load_mem_tries_for_shards);
this.load_mem_tries_for_tracked_shards = config.load_mem_tries_for_tracked_shards;
this.load_memtries_for_shards.clone_from(&config.load_memtries_for_shards);
this.load_memtries_for_tracked_shards = config.load_memtries_for_tracked_shards;

this
}
Expand Down
10 changes: 5 additions & 5 deletions core/store/src/trie/mem/arena/alloc.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use super::metrics::MEM_TRIE_ARENA_ACTIVE_ALLOCS_COUNT;
use super::metrics::MEMTRIE_ARENA_ACTIVE_ALLOCS_COUNT;
use super::single_thread::STArenaMemory;
use super::{ArenaMemory, ArenaPos, ArenaSliceMut};
use crate::trie::mem::arena::metrics::{
MEM_TRIE_ARENA_ACTIVE_ALLOCS_BYTES, MEM_TRIE_ARENA_MEMORY_USAGE_BYTES,
MEMTRIE_ARENA_ACTIVE_ALLOCS_BYTES, MEMTRIE_ARENA_MEMORY_USAGE_BYTES,
};
use crate::trie::mem::arena::ArenaMemoryMut;
use crate::trie::mem::flexible_data::encoding::BorshFixedSize;
Expand Down Expand Up @@ -82,11 +82,11 @@ impl Allocator {
next_alloc_pos: ArenaPos::invalid(),
active_allocs_bytes: 0,
active_allocs_count: 0,
active_allocs_bytes_gauge: MEM_TRIE_ARENA_ACTIVE_ALLOCS_BYTES
active_allocs_bytes_gauge: MEMTRIE_ARENA_ACTIVE_ALLOCS_BYTES
.with_label_values(&[&name]),
active_allocs_count_gauge: MEM_TRIE_ARENA_ACTIVE_ALLOCS_COUNT
active_allocs_count_gauge: MEMTRIE_ARENA_ACTIVE_ALLOCS_COUNT
.with_label_values(&[&name]),
memory_usage_gauge: MEM_TRIE_ARENA_MEMORY_USAGE_BYTES.with_label_values(&[&name]),
memory_usage_gauge: MEMTRIE_ARENA_MEMORY_USAGE_BYTES.with_label_values(&[&name]),
}
}

Expand Down
4 changes: 2 additions & 2 deletions core/store/src/trie/mem/arena/concurrent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ impl ArenaMut for ConcurrentArenaForThread {
mod tests {
use super::ConcurrentArena;
use crate::trie::mem::arena::alloc::CHUNK_SIZE;
use crate::trie::mem::arena::metrics::MEM_TRIE_ARENA_MEMORY_USAGE_BYTES;
use crate::trie::mem::arena::metrics::MEMTRIE_ARENA_MEMORY_USAGE_BYTES;
use crate::trie::mem::arena::{Arena, ArenaMemory, ArenaMut, ArenaWithDealloc};

#[test]
Expand All @@ -233,7 +233,7 @@ mod tests {
assert_eq!(starena.num_active_allocs(), 3);
assert_eq!(starena.active_allocs_bytes(), 24 + 32 + 40);
assert_eq!(
MEM_TRIE_ARENA_MEMORY_USAGE_BYTES.get_metric_with_label_values(&[&name]).unwrap().get(),
MEMTRIE_ARENA_MEMORY_USAGE_BYTES.get_metric_with_label_values(&[&name]).unwrap().get(),
3 * CHUNK_SIZE as i64
);

Expand Down
12 changes: 6 additions & 6 deletions core/store/src/trie/mem/arena/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
use near_o11y::metrics::{try_create_int_gauge_vec, IntGaugeVec};
use std::sync::LazyLock;

pub static MEM_TRIE_ARENA_ACTIVE_ALLOCS_BYTES: LazyLock<IntGaugeVec> = LazyLock::new(|| {
pub static MEMTRIE_ARENA_ACTIVE_ALLOCS_BYTES: LazyLock<IntGaugeVec> = LazyLock::new(|| {
try_create_int_gauge_vec(
"near_mem_trie_arena_active_allocs_bytes",
"near_memtrie_arena_active_allocs_bytes",
"Total size of active allocations on the in-memory trie arena",
&["shard_uid"],
)
.unwrap()
});

pub static MEM_TRIE_ARENA_ACTIVE_ALLOCS_COUNT: LazyLock<IntGaugeVec> = LazyLock::new(|| {
pub static MEMTRIE_ARENA_ACTIVE_ALLOCS_COUNT: LazyLock<IntGaugeVec> = LazyLock::new(|| {
try_create_int_gauge_vec(
"near_mem_trie_arena_active_allocs_count",
"near_memtrie_arena_active_allocs_count",
"Total number of active allocations on the in-memory trie arena",
&["shard_uid"],
)
.unwrap()
});

pub static MEM_TRIE_ARENA_MEMORY_USAGE_BYTES: LazyLock<IntGaugeVec> = LazyLock::new(|| {
pub static MEMTRIE_ARENA_MEMORY_USAGE_BYTES: LazyLock<IntGaugeVec> = LazyLock::new(|| {
try_create_int_gauge_vec(
"near_mem_trie_arena_memory_usage_bytes",
"near_memtrie_arena_memory_usage_bytes",
"Memory usage of the in-memory trie arena",
&["shard_uid"],
)
Expand Down
26 changes: 13 additions & 13 deletions core/store/src/trie/mem/loading.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use super::arena::single_thread::STArena;
use super::mem_tries::MemTries;
use super::memtries::MemTries;
use super::node::MemTrieNodeId;
use crate::adapter::StoreAdapter;
use crate::flat::FlatStorageStatus;
use crate::trie::mem::arena::Arena;
use crate::trie::mem::construction::TrieConstructor;
use crate::trie::mem::mem_trie_update::TrackingMode;
use crate::trie::mem::memtrie_update::TrackingMode;
use crate::trie::mem::parallel_loader::load_memtrie_in_parallel;
use crate::trie::ops::insert_delete::GenericTrieUpdateInsertDelete;
use crate::{DBCol, NibbleSlice, Store};
Expand Down Expand Up @@ -137,7 +137,7 @@ pub fn load_trie_from_flat_state_and_delta(
None => get_state_root(store, flat_head.hash, shard_uid)?,
};

let mut mem_tries =
let mut memtries =
load_trie_from_flat_state(&store, shard_uid, state_root, flat_head.height, parallelize)
.unwrap();

Expand All @@ -156,7 +156,7 @@ pub fn load_trie_from_flat_state_and_delta(
let old_state_root = get_state_root(store, prev_hash, shard_uid)?;
let new_state_root = get_state_root(store, hash, shard_uid)?;

let mut trie_update = mem_tries.update(old_state_root, TrackingMode::None)?;
let mut trie_update = memtries.update(old_state_root, TrackingMode::None)?;
for (key, value) in changes.0 {
match value {
Some(value) => {
Expand All @@ -166,15 +166,15 @@ pub fn load_trie_from_flat_state_and_delta(
};
}

let mem_trie_changes = trie_update.to_mem_trie_changes_only();
let new_root_after_apply = mem_tries.apply_memtrie_changes(height, &mem_trie_changes);
let memtrie_changes = trie_update.to_memtrie_changes_only();
let new_root_after_apply = memtries.apply_memtrie_changes(height, &memtrie_changes);
assert_eq!(new_root_after_apply, new_state_root);
}
debug!(target: "memtrie", %shard_uid, "Applied memtrie changes for height {}", height);
}

debug!(target: "memtrie", %shard_uid, "Done loading memtries for shard");
Ok(mem_tries)
Ok(memtries)
}

#[cfg(test)]
Expand Down Expand Up @@ -446,30 +446,30 @@ mod tests {
// Load into memory. It should load the base flat state (block 0), plus all
// four deltas. We'll check against the state roots at each block; they should
// all exist in the loaded memtrie.
let mem_tries = load_trie_from_flat_state_and_delta(&store, shard_uid, None, true).unwrap();
let memtries = load_trie_from_flat_state_and_delta(&store, shard_uid, None, true).unwrap();

assert_eq!(
memtrie_lookup(mem_tries.get_root(&state_root_0).unwrap(), &test_key.to_vec(), None)
memtrie_lookup(memtries.get_root(&state_root_0).unwrap(), &test_key.to_vec(), None)
.map(|v| v.to_flat_value()),
Some(FlatStateValue::inlined(&test_val0))
);
assert_eq!(
memtrie_lookup(mem_tries.get_root(&state_root_1).unwrap(), &test_key.to_vec(), None)
memtrie_lookup(memtries.get_root(&state_root_1).unwrap(), &test_key.to_vec(), None)
.map(|v| v.to_flat_value()),
Some(FlatStateValue::inlined(&test_val1))
);
assert_eq!(
memtrie_lookup(mem_tries.get_root(&state_root_2).unwrap(), &test_key.to_vec(), None)
memtrie_lookup(memtries.get_root(&state_root_2).unwrap(), &test_key.to_vec(), None)
.map(|v| v.to_flat_value()),
Some(FlatStateValue::inlined(&test_val2))
);
assert_eq!(
memtrie_lookup(mem_tries.get_root(&state_root_3).unwrap(), &test_key.to_vec(), None)
memtrie_lookup(memtries.get_root(&state_root_3).unwrap(), &test_key.to_vec(), None)
.map(|v| v.to_flat_value()),
Some(FlatStateValue::inlined(&test_val3))
);
assert_eq!(
memtrie_lookup(mem_tries.get_root(&state_root_4).unwrap(), &test_key.to_vec(), None)
memtrie_lookup(memtries.get_root(&state_root_4).unwrap(), &test_key.to_vec(), None)
.map(|v| v.to_flat_value()),
Some(FlatStateValue::inlined(&test_val4))
);
Expand Down
4 changes: 2 additions & 2 deletions core/store/src/trie/mem/lookup.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use super::arena::ArenaMemory;
use super::flexible_data::value::ValueView;
use super::metrics::MEM_TRIE_NUM_LOOKUPS;
use super::metrics::MEMTRIE_NUM_LOOKUPS;
use super::node::{MemTrieNodePtr, MemTrieNodeView};
use crate::NibbleSlice;
use near_primitives::hash::CryptoHash;
Expand All @@ -15,7 +15,7 @@ pub fn memtrie_lookup<'a, M: ArenaMemory>(
key: &[u8],
mut nodes_accessed: Option<&mut Vec<(CryptoHash, Arc<[u8]>)>>,
) -> Option<ValueView<'a>> {
MEM_TRIE_NUM_LOOKUPS.inc();
MEMTRIE_NUM_LOOKUPS.inc();
let mut nibbles = NibbleSlice::new(key);
let mut node = root;

Expand Down
Loading

0 comments on commit 88d1a9d

Please sign in to comment.