Skip to content

Commit

Permalink
monitor proof size
Browse files Browse the repository at this point in the history
  • Loading branch information
pugachAG committed Aug 29, 2024
1 parent 117ee0e commit e46392c
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 8 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3542,7 +3542,8 @@ impl Chain {
}
let Some(account_id) = me.as_ref() else { return Ok(false) };
Ok(self.epoch_manager.is_chunk_producer_for_epoch(epoch_id, account_id)?
|| self.epoch_manager.is_chunk_producer_for_epoch(&next_epoch_id, account_id)?)
|| self.epoch_manager.is_chunk_producer_for_epoch(&next_epoch_id, account_id)?
|| true)
}

/// Creates jobs which will update shards for the given block and incoming
Expand Down
1 change: 1 addition & 0 deletions runtime/runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ serde_json.workspace = true
sha2.workspace = true
thiserror.workspace = true
tracing.workspace = true
zstd.workspace = true

near-crypto.workspace = true
near-o11y.workspace = true
Expand Down
26 changes: 23 additions & 3 deletions runtime/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1941,14 +1941,34 @@ impl Runtime {

state_update.commit(StateChangeCause::UpdatedDelayedReceipts);
self.apply_state_patch(&mut state_update, state_patch);

let apply_reason_label = apply_state.apply_reason.as_ref().unwrap().to_string();
let shard_id_str = apply_state.shard_id.to_string();

let chunk_recorded_size_upper_bound =
state_update.trie.recorded_storage_size_upper_bound() as f64;
let shard_id_str = apply_state.shard_id.to_string();
metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND
.with_label_values(&[shard_id_str.as_str()])
.with_label_values(&[shard_id_str.as_str(), &apply_reason_label])
.observe(chunk_recorded_size_upper_bound);
let (trie, trie_changes, state_changes) = state_update.finalize()?;

if let Some(partial_storage) = trie.recorded_storage() {
let bytes = borsh::to_vec(&partial_storage.nodes).unwrap();
metrics::CHUNK_STATE_WITNESS_STORAGE_PROOF_SIZE
.with_label_values(&[
&shard_id_str,
&apply_reason_label,
])
.observe(bytes.len() as f64);
let compressed = zstd::encode_all(bytes.as_slice(), 3).unwrap();
metrics::CHUNK_STATE_WITNESS_COMPRESSED_STORAGE_PROOF_SIZE
.with_label_values(&[
&shard_id_str,
&apply_reason_label,
])
.observe(compressed.len() as f64);
}

if let Some(prefetcher) = &processing_state.prefetcher {
// Only clear the prefetcher queue after finalize is done because as part of receipt
// processing we also prefetch account data and access keys that are accessed in
Expand Down Expand Up @@ -1979,7 +1999,7 @@ impl Runtime {
let state_root = trie_changes.new_root;
let chunk_recorded_size = trie.recorded_storage_size() as f64;
metrics::CHUNK_RECORDED_SIZE
.with_label_values(&[shard_id_str.as_str()])
.with_label_values(&[shard_id_str.as_str(), &apply_reason_label])
.observe(chunk_recorded_size);
metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO
.with_label_values(&[shard_id_str.as_str()])
Expand Down
34 changes: 30 additions & 4 deletions runtime/runtime/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,17 +333,17 @@ pub static CHUNK_RECORDED_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
try_create_histogram_vec(
"near_chunk_recorded_size",
"Total size of storage proof (recorded trie nodes for state witness, post-finalization) for a single chunk",
&["shard_id"],
Some(buckets_for_chunk_storage_proof_size()),
&["shard_id", "apply_reason"],
Some(buckets_for_witness_field_size()),
)
.unwrap()
});
pub static CHUNK_RECORDED_SIZE_UPPER_BOUND: LazyLock<HistogramVec> = LazyLock::new(|| {
try_create_histogram_vec(
"near_chunk_recorded_size_upper_bound",
"Upper bound of storage proof size (recorded trie nodes size + estimated charges, pre-finalization) for a single chunk",
&["shard_id"],
Some(buckets_for_chunk_storage_proof_size()),
&["shard_id", "apply_reason"],
Some(buckets_for_witness_field_size()),
)
.unwrap()
});
Expand Down Expand Up @@ -761,3 +761,29 @@ pub fn report_recorded_column_sizes(trie: &Trie, apply_state: &ApplyState) {
.with_label_values(&[shard_id_str.as_str(), "values"])
.observe(total_size.values_size as f64);
}

pub(crate) static CHUNK_STATE_WITNESS_COMPRESSED_STORAGE_PROOF_SIZE: LazyLock<HistogramVec> =
LazyLock::new(|| {
try_create_histogram_vec(
"near_chunk_state_witness_compressed_storage_proof_size",
"compressed storage proof size",
&["shard_id", "apply_reason"],
Some(buckets_for_witness_field_size()),
)
.unwrap()
});

pub(crate) static CHUNK_STATE_WITNESS_STORAGE_PROOF_SIZE: LazyLock<HistogramVec> =
LazyLock::new(|| {
try_create_histogram_vec(
"near_chunk_state_witness_storage_proof_size",
"compressed storage proof size",
&["shard_id", "apply_reason"],
Some(buckets_for_witness_field_size()),
)
.unwrap()
});

fn buckets_for_witness_field_size() -> Vec<f64> {
linear_buckets(100_000., 50_000., 200).unwrap()
}

0 comments on commit e46392c

Please sign in to comment.