Skip to content

Commit

Permalink
Merge branch 'master' into contract-dist-nightly
Browse files Browse the repository at this point in the history
  • Loading branch information
tayfunelmas authored Oct 24, 2024
2 parents fb639c7 + cd319ac commit 8d5c4d7
Show file tree
Hide file tree
Showing 65 changed files with 336 additions and 430 deletions.
2 changes: 1 addition & 1 deletion chain/chain/src/blocks_delay_tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ impl BlocksDelayTracker {
let height = block.header().height();
let chunks = block
.chunks()
.iter()
.iter_deprecated()
.map(|chunk| {
if chunk.height_included() == height {
let chunk_hash = chunk.chunk_hash();
Expand Down
73 changes: 51 additions & 22 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,8 @@ impl Chain {
epoch_manager: &dyn EpochManagerAdapter,
store_update: &mut ChainStoreUpdate,
) -> Result<(), Error> {
for (chunk_header, state_root) in genesis.chunks().iter().zip(state_roots.iter()) {
for (chunk_header, state_root) in genesis.chunks().iter_deprecated().zip(state_roots.iter())
{
let congestion_info =
if ProtocolFeature::CongestionControl.enabled(chain_genesis.protocol_version) {
genesis
Expand Down Expand Up @@ -835,7 +836,7 @@ impl Chain {
let epoch_id = block.header().epoch_id();
let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?;

for (shard_index, chunk_header) in block.chunks().iter().enumerate() {
for (shard_index, chunk_header) in block.chunks().iter_deprecated().enumerate() {
let shard_id = shard_layout.get_shard_id(shard_index);
if chunk_header.height_created() == genesis_block.header().height() {
// Special case: genesis chunks can be in non-genesis blocks and don't have a signature
Expand Down Expand Up @@ -1238,7 +1239,7 @@ impl Chain {
let prev_chunk_headers =
Chain::get_prev_chunk_headers(self.epoch_manager.as_ref(), prev_block)?;
for (chunk_header, prev_chunk_header) in
block.chunks().iter().zip(prev_chunk_headers.iter())
block.chunks().iter_deprecated().zip(prev_chunk_headers.iter())
{
if chunk_header.height_included() == block.header().height() {
// new chunk
Expand Down Expand Up @@ -1266,7 +1267,7 @@ impl Chain {
let block_height = block.header().height();
for pair in block
.chunks()
.iter()
.iter_deprecated()
.filter(|chunk| chunk.is_new_chunk(block_height))
.flat_map(|chunk| chunk.prev_validator_proposals())
.zip_longest(block.header().prev_validator_proposals())
Expand Down Expand Up @@ -1329,13 +1330,14 @@ impl Chain {
let epoch_id = block.header().epoch_id();
let shard_layout = self.epoch_manager.get_shard_layout(&epoch_id)?;

for (shard_index, chunk_header) in block.chunks().iter().enumerate() {
for (shard_index, chunk_header) in block.chunks().iter_deprecated().enumerate() {
let shard_id = shard_layout.get_shard_id(shard_index);
// Check if any chunks are invalid in this block.
if let Some(encoded_chunk) =
self.chain_store.is_invalid_chunk(&chunk_header.chunk_hash())?
{
let merkle_paths = Block::compute_chunk_headers_root(block.chunks().iter()).1;
let merkle_paths =
Block::compute_chunk_headers_root(block.chunks().iter_deprecated()).1;
let merkle_proof =
merkle_paths.get(shard_index).ok_or_else(|| Error::InvalidShardId(shard_id))?;
let chunk_proof = ChunkProofs {
Expand Down Expand Up @@ -1419,7 +1421,7 @@ impl Chain {
let block_height = block.header().height();
let mut receipt_proofs_by_shard_id = HashMap::new();

for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
if !chunk_header.is_new_chunk(block_height) {
continue;
}
Expand Down Expand Up @@ -1698,17 +1700,25 @@ impl Chain {
// sync hash block. The logic below adjusts the new_tail so that every
// shard is guaranteed to have at least one new chunk in the blocks
// leading to the sync hash block.
let min_height_included =
prev_block.chunks().iter().map(|chunk| chunk.height_included()).min().unwrap();
let min_height_included = prev_block
.chunks()
.iter_deprecated()
.map(|chunk| chunk.height_included())
.min()
.unwrap();

tracing::debug!(target: "sync", ?min_height_included, ?new_tail, "adjusting tail for missing chunks");
new_tail = std::cmp::min(new_tail, min_height_included.saturating_sub(1));

// In order to find the right new_chunk_tail we need to find the minimum
// of chunk height_created for chunks in the new tail block.
let new_tail_block = self.get_block_by_height(new_tail)?;
let new_chunk_tail =
new_tail_block.chunks().iter().map(|chunk| chunk.height_created()).min().unwrap();
let new_chunk_tail = new_tail_block
.chunks()
.iter_deprecated()
.map(|chunk| chunk.height_created())
.min()
.unwrap();

let tip = Tip::from_header(prev_block.header());
let final_head = Tip::from_header(self.genesis.header());
Expand Down Expand Up @@ -2070,13 +2080,29 @@ impl Chain {

self.check_orphans(me, *block.hash(), block_processing_artifacts, apply_chunks_done_sender);

self.check_if_upgrade_needed(&block_hash);

// Determine the block status of this block (whether it is a side fork and updates the chain head)
// Block status is needed in Client::on_block_accepted_with_optional_chunk_produce to
// decide to how to update the tx pool.
let block_status = self.determine_status(new_head, prev_head);
Ok(AcceptedBlock { hash: *block.hash(), status: block_status, provenance })
}

fn check_if_upgrade_needed(&self, block_hash: &CryptoHash) {
if let Ok(next_epoch_protocol_version) =
self.epoch_manager.get_next_epoch_protocol_version(block_hash)
{
if PROTOCOL_VERSION < next_epoch_protocol_version {
error!(
"The protocol version is about to be superseded, please upgrade nearcore as soon as possible. Client protocol version {}, new protocol version {}",
PROTOCOL_VERSION,
next_epoch_protocol_version,
);
}
}
}

/// Gets new flat storage head candidate for given `shard_id` and newly
/// processed `block`.
/// It will be `block.last_final_block().chunk(shard_id).prev_block_hash()`
Expand Down Expand Up @@ -2107,7 +2133,7 @@ impl Chain {

let last_final_block_chunks = last_final_block.chunks();
let chunk_header = last_final_block_chunks
.iter()
.iter_deprecated()
.find(|chunk| chunk.shard_id() == shard_id)
.ok_or_else(|| Error::InvalidShardId(shard_id))?;
let new_flat_head = *chunk_header.prev_block_hash();
Expand Down Expand Up @@ -2148,7 +2174,7 @@ impl Chain {
}

/// Preprocess a block before applying chunks, verify that we have the necessary information
/// to process the block an the block is valid.
/// to process the block and the block is valid.
/// Note that this function does NOT introduce any changes to chain state.
fn preprocess_block(
&self,
Expand Down Expand Up @@ -2182,7 +2208,8 @@ impl Chain {

// Delay hitting the db for current chain head until we know this block is not already known.
let head = self.head()?;
let is_next = header.prev_hash() == &head.last_block_hash;
let prev_hash = header.prev_hash();
let is_next = prev_hash == &head.last_block_hash;

// Sandbox allows fast-forwarding, so only enable when not within sandbox
if !cfg!(feature = "sandbox") {
Expand All @@ -2195,7 +2222,7 @@ impl Chain {
}

// Block is an orphan if we do not know about the previous full block.
if !is_next && !self.block_exists(header.prev_hash())? {
if !is_next && !self.block_exists(prev_hash)? {
// Before we add the block to the orphan pool, do some checks:
// 1. Block header is signed by the block producer for height.
// 2. Chunk headers in block body match block header.
Expand Down Expand Up @@ -2521,7 +2548,7 @@ impl Chain {
let (chunk_headers_root, chunk_proofs) = merklize(
&sync_prev_block
.chunks()
.iter()
.iter_deprecated()
.map(|shard_chunk| {
ChunkHashHeight(shard_chunk.chunk_hash(), shard_chunk.height_included())
})
Expand Down Expand Up @@ -2550,7 +2577,7 @@ impl Chain {
let (prev_chunk_headers_root, prev_chunk_proofs) = merklize(
&prev_block
.chunks()
.iter()
.iter_deprecated()
.map(|shard_chunk| {
ChunkHashHeight(shard_chunk.chunk_hash(), shard_chunk.height_included())
})
Expand Down Expand Up @@ -2597,7 +2624,7 @@ impl Chain {
let (block_receipts_root, block_receipts_proofs) = merklize(
&block
.chunks()
.iter()
.iter_deprecated()
.map(|chunk| chunk.prev_outgoing_receipts_root())
.collect::<Vec<CryptoHash>>(),
);
Expand Down Expand Up @@ -3140,7 +3167,8 @@ impl Chain {
chunk: &ShardChunk,
) -> Result<(), Error> {
if !validate_transactions_order(chunk.transactions()) {
let merkle_paths = Block::compute_chunk_headers_root(block.chunks().iter()).1;
let merkle_paths =
Block::compute_chunk_headers_root(block.chunks().iter_deprecated()).1;
let epoch_id = block.header().epoch_id();
let shard_layout = self.epoch_manager.get_shard_layout(&epoch_id)?;
let shard_id = chunk.shard_id();
Expand Down Expand Up @@ -3468,8 +3496,9 @@ impl Chain {
let shard_layout = self.epoch_manager.get_shard_layout(&epoch_id)?;
let shard_id = chunk_header.shard_id();
let shard_index = shard_layout.get_shard_index(shard_id);
let prev_merkle_proofs = Block::compute_chunk_headers_root(prev_block.chunks().iter()).1;
let merkle_proofs = Block::compute_chunk_headers_root(block.chunks().iter()).1;
let prev_merkle_proofs =
Block::compute_chunk_headers_root(prev_block.chunks().iter_deprecated()).1;
let merkle_proofs = Block::compute_chunk_headers_root(block.chunks().iter_deprecated()).1;
let prev_chunk =
self.get_chunk_clone_from_header(&prev_block.chunks()[shard_index].clone()).unwrap();

Expand Down Expand Up @@ -3606,7 +3635,7 @@ impl Chain {

let mut maybe_jobs = vec![];
for (shard_index, (chunk_header, prev_chunk_header)) in
block.chunks().iter().zip(prev_chunk_headers.iter()).enumerate()
block.chunks().iter_deprecated().zip(prev_chunk_headers.iter()).enumerate()
{
// XXX: This is a bit questionable -- sandbox state patching works
// only for a single shard. This so far has been enough.
Expand Down
10 changes: 6 additions & 4 deletions chain/chain/src/garbage_collection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ impl ChainStore {
let prev_block = self.get_block(&prev_hash);
if let Ok(prev_block) = prev_block {
let min_height_included =
prev_block.chunks().iter().map(|chunk| chunk.height_included()).min();
prev_block.chunks().iter_deprecated().map(|chunk| chunk.height_included()).min();
if let Some(min_height_included) = min_height_included {
tracing::debug!(target: "sync", ?min_height_included, ?gc_height, "adjusting gc_height for missing chunks");
gc_height = std::cmp::min(gc_height, min_height_included - 1);
Expand Down Expand Up @@ -650,7 +650,7 @@ impl<'a> ChainStoreUpdate<'a> {
// 6. Canonical Chain only clearing
// Delete chunks, chunk-indexed data and block headers
let mut min_chunk_height = self.tail()?;
for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
if min_chunk_height > chunk_header.height_created() {
min_chunk_height = chunk_header.height_created();
}
Expand Down Expand Up @@ -832,8 +832,10 @@ impl<'a> ChainStoreUpdate<'a> {
fn gc_outcomes(&mut self, block: &Block) -> Result<(), Error> {
let block_hash = block.hash();
let store_update = self.store().store_update();
for chunk_header in
block.chunks().iter().filter(|h| h.height_included() == block.header().height())
for chunk_header in block
.chunks()
.iter_deprecated()
.filter(|h| h.height_included() == block.header().height())
{
// It is ok to use the shard id from the header because it is a new
// chunk. An old chunk may have the shard id from the parent shard.
Expand Down
4 changes: 4 additions & 0 deletions chain/chain/src/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1342,6 +1342,10 @@ impl RuntimeAdapter for NightshadeRuntime {
let epoch_manager = self.epoch_manager.read();
Ok(epoch_manager.will_shard_layout_change(parent_hash)?)
}

fn compiled_contract_cache(&self) -> &dyn ContractRuntimeCache {
self.compiled_contract_cache.as_ref()
}
}

/// Get the limit on the number of new receipts imposed by the local congestion control.
Expand Down
4 changes: 3 additions & 1 deletion chain/chain/src/stateless_validation/chunk_endorsement.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ pub fn validate_chunk_endorsements_in_block(

let epoch_id = epoch_manager.get_epoch_id_from_prev_block(block.header().prev_hash())?;
let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?;
for (chunk_header, signatures) in block.chunks().iter().zip(block.chunk_endorsements()) {
for (chunk_header, signatures) in
block.chunks().iter_deprecated().zip(block.chunk_endorsements())
{
// For old chunks, we optimize the block by not including the chunk endorsements.
if chunk_header.height_included() != block.header().height() {
if !signatures.is_empty() {
Expand Down
2 changes: 1 addition & 1 deletion chain/chain/src/stateless_validation/chunk_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ fn validate_source_receipt_proofs(
// Collect all receipts coming from this block.
let mut block_receipt_proofs = Vec::new();

for chunk in block.chunks().iter() {
for chunk in block.chunks().iter_deprecated() {
if !chunk.is_new_chunk(block.header().height()) {
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ impl Chain {
let final_block = chain_store.get_block(&final_block_hash)?;
let final_block_chunk_created_heights = final_block
.chunks()
.iter()
.iter_deprecated()
.map(|chunk| (chunk.shard_id(), chunk.height_created()))
.collect::<Vec<_>>();
clear_before_last_final_block(chain_store, &final_block_chunk_created_heights)?;
Expand Down
4 changes: 2 additions & 2 deletions chain/chain/src/store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -770,7 +770,7 @@ impl ChainStore {
block_hash: &CryptoHash,
) -> Result<HashMap<ShardId, Vec<ExecutionOutcomeWithIdAndProof>>, Error> {
let block = self.get_block(block_hash)?;
let chunk_headers = block.chunks().iter().cloned().collect::<Vec<_>>();
let chunk_headers = block.chunks().iter_deprecated().cloned().collect::<Vec<_>>();

let mut res = HashMap::new();
for chunk_header in chunk_headers {
Expand Down Expand Up @@ -2183,7 +2183,7 @@ impl<'a> ChainStoreUpdate<'a> {
source_store.get_chunk_extra(block_hash, &shard_uid)?.clone(),
);
}
for (shard_index, chunk_header) in block.chunks().iter().enumerate() {
for (shard_index, chunk_header) in block.chunks().iter_deprecated().enumerate() {
let shard_id = shard_layout.get_shard_id(shard_index);
let chunk_hash = chunk_header.chunk_hash();
chain_store_update
Expand Down
6 changes: 3 additions & 3 deletions chain/chain/src/store_validator/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ pub(crate) fn block_chunks_exist(
// for single-shard, no-missing-chunks state sync or epoch sync tests.
return Ok(());
}
for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
if chunk_header.height_included() == block.header().height() {
if let Some(me) = &sv.me {
let cares_about_shard = sv.shard_tracker.care_about_shard(
Expand Down Expand Up @@ -419,7 +419,7 @@ pub(crate) fn block_chunks_height_validity(
_block_hash: &CryptoHash,
block: &Block,
) -> Result<(), StoreValidatorError> {
for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
if chunk_header.height_created() > block.header().height() {
err!(
"Invalid ShardChunk included, chunk_header = {:?}, block = {:?}",
Expand Down Expand Up @@ -704,7 +704,7 @@ pub(crate) fn outcome_indexed_by_block_hash(
"Can't get Block {} from DB",
block_hash
);
for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
if chunk_header.height_included() == block.header().height() {
let shard_uid = sv
.epoch_manager
Expand Down
2 changes: 1 addition & 1 deletion chain/chain/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ pub fn display_chain(me: &Option<AccountId>, chain: &mut Chain, tail: bool) {
}
);
if let Some(block) = maybe_block {
for chunk_header in block.chunks().iter() {
for chunk_header in block.chunks().iter_deprecated() {
let chunk_producer = epoch_manager
.get_chunk_producer(
&epoch_id,
Expand Down
7 changes: 7 additions & 0 deletions chain/chain/src/test_utils/kv_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ use near_store::{
set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, Store, StoreUpdate, Trie,
TrieChanges, WrappedTrieChanges,
};
use near_vm_runner::{ContractRuntimeCache, NoContractRuntimeCache};
use num_rational::Ratio;
use rand::Rng;
use std::cmp::Ordering;
Expand Down Expand Up @@ -87,6 +88,7 @@ pub struct KeyValueRuntime {
state: RwLock<HashMap<StateRoot, KVState>>,
state_size: RwLock<HashMap<StateRoot, u64>>,
headers_cache: RwLock<HashMap<CryptoHash, BlockHeader>>,
contract_cache: NoContractRuntimeCache,
}

/// DEPRECATED. DO NOT USE for new tests. Use the real EpochManager, familiarize
Expand Down Expand Up @@ -373,6 +375,7 @@ impl KeyValueRuntime {
headers_cache: RwLock::new(HashMap::new()),
state: RwLock::new(state),
state_size: RwLock::new(state_size),
contract_cache: NoContractRuntimeCache,
})
}

Expand Down Expand Up @@ -1570,4 +1573,8 @@ impl RuntimeAdapter for KeyValueRuntime {
) -> Result<bool, Error> {
Ok(false)
}

fn compiled_contract_cache(&self) -> &dyn ContractRuntimeCache {
&self.contract_cache
}
}
Loading

0 comments on commit 8d5c4d7

Please sign in to comment.