Skip to content

Commit

Permalink
feat: integration test and restructured code
Browse files Browse the repository at this point in the history
  • Loading branch information
tomg10 committed Nov 8, 2023
1 parent 84c3d2e commit d4618d5
Show file tree
Hide file tree
Showing 33 changed files with 717 additions and 290 deletions.
10 changes: 9 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
!docker/zk/entrypoint.sh
!docker/local-node/entrypoint.sh
!docker/external-node/entrypoint.sh
!docker/integration-test-node/generic_entrypoint.sh
!docker/contract-verifier/install-all-solc.sh
!etc/test_config
!etc/env/dev.env.example
!etc/env/docker.env
!etc/env/base
!etc/tokens
!etc/ERC20
!artifacts
!keys
keys/setup
!bin/
Expand All @@ -24,6 +24,14 @@ keys/setup
!Cargo.toml
!contracts/
!setup_2\^26.key

# files needed for integration-test-node
!target/release/
target/release/.fingerprint/
target/release/build/
target/release/deps/
target/release/*.d

# It's required to remove .git from contracts,
# otherwise yarn tries to use .git parent directory that
# doesn't exist.
Expand Down
2 changes: 0 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

34 changes: 30 additions & 4 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::{sync::Arc, time::Duration};

use prometheus_exporter::PrometheusExporterConfig;
use zksync_basic_types::{Address, L2ChainId};
use zksync_core::sync_layer::snapshots::StateKeeperConfig;
use zksync_core::{
api_server::{
execution_sandbox::VmConcurrencyLimiter,
Expand Down Expand Up @@ -183,14 +184,18 @@ async fn init_tasks(
.build()
.await
.context("failed to build a prover_tree_pool")?;
let tree_handle =
task::spawn(metadata_calculator.run(tree_pool, prover_tree_pool, tree_stop_receiver));
let tree_handle = task::spawn(metadata_calculator.run(
tree_pool,
prover_tree_pool,
tree_stop_receiver,
false,
));

let consistency_checker_handle = None;

let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone()));
let sk_handle = task::spawn(state_keeper.run());
let fetcher_handle = tokio::spawn(fetcher.run());
let sk_handle = task::spawn(state_keeper.run(false));
let fetcher_handle = tokio::spawn(fetcher.run(false));
let gas_adjuster_handle = tokio::spawn(gas_adjuster.clone().run(stop_receiver.clone()));

let (tx_sender, vm_barrier, cache_update_handle) = {
Expand Down Expand Up @@ -390,10 +395,31 @@ async fn main() -> anyhow::Result<()> {
let main_node_client = <dyn MainNodeClient>::json_rpc(&main_node_url)
.context("Failed creating JSON-RPC client for main node")?;

let state_keeper_params = StateKeeperConfig {
chain_id: config.remote.l2_chain_id,
connection_pool: connection_pool.clone(),
enum_index_migration_chunk_size: config.optional.enum_index_migration_chunk_size,
l2_erc20_bridge_addr: config.remote.l2_erc20_bridge_addr,
main_node_url: main_node_url.clone(),
state_keeper_db_path: config.required.state_cache_path.clone(),
};

let metadata_calculator_config = MetadataCalculatorConfig {
db_path: &config.required.merkle_tree_path,
mode: MetadataCalculatorModeConfig::Lightweight,
delay_interval: config.optional.metadata_calculator_delay(),
max_l1_batches_per_iter: 1,
multi_get_chunk_size: config.optional.merkle_tree_multi_get_chunk_size,
block_cache_capacity: config.optional.merkle_tree_block_cache_size(),
memtable_capacity: config.optional.merkle_tree_memtable_capacity(),
};

load_from_snapshot_if_needed(
&mut connection_pool.access_storage().await.unwrap(),
&main_node_client,
&config.required.merkle_tree_path,
state_keeper_params,
metadata_calculator_config,
)
.await
.context("Loading newest snapshot failed")?;
Expand Down
2 changes: 0 additions & 2 deletions core/bin/snapshot_creator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ zksync_object_store = { path = "../../lib/object_store" }
vlog = { path = "../../lib/vlog" }

anyhow = "1.0"
clap = { version = "4.2.4", features = ["derive"] }
tokio = { version = "1", features = ["full"] }
tracing = "0.1"
futures = "0.3"
serde = { version = "1.0.175", features = ["derive"] }
4 changes: 3 additions & 1 deletion core/bin/snapshot_creator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) {
.blocks_dal()
.get_sealed_l1_batch_number()
.await
.unwrap();
.unwrap()
- 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch

let miniblock_number = conn
.storage_logs_snapshots_dal()
.get_last_miniblock_number(l1_batch_number)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
-- Add down migration script here

ALTER TABLE factory_deps ADD CONSTRAINT factory_deps_miniblock_number_fkey
FOREIGN KEY (miniblock_number) REFERENCES miniblocks (number);
ALTER TABLE initial_writes ADD CONSTRAINT initial_writes_l1_batch_number_fkey
FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number);
ALTER TABLE storage_logs ADD CONSTRAINT storage_logs_miniblock_number_fkey
FOREIGN KEY (miniblock_number) REFERENCES miniblocks (number);

DROP TABLE IF EXISTS applied_snapshot_status;
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
-- Add up migration script here

ALTER TABLE factory_deps DROP CONSTRAINT factory_deps_miniblock_number_fkey;
ALTER TABLE initial_writes DROP CONSTRAINT initial_writes_l1_batch_number_fkey;
ALTER TABLE storage_logs DROP CONSTRAINT storage_logs_miniblock_number_fkey;

CREATE TABLE applied_snapshot_status
(
l1_batch_number BIGINT NOT NULL PRIMARY KEY,
Expand Down
24 changes: 24 additions & 0 deletions core/lib/dal/sqlx-data.json
Original file line number Diff line number Diff line change
Expand Up @@ -8620,6 +8620,18 @@
},
"query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id"
},
"b6f719d6e80dd29fffe7d1e79a6248cb4a516b0abf5ca8ecb3b14b46e251a1d2": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "DELETE FROM l1_batches WHERE number = $1"
},
"b6f9874059c57e5e59f3021936437e9ff71a68065dfc19c295d806d7a9aafc93": {
"describe": {
"columns": [],
Expand Down Expand Up @@ -9888,6 +9900,18 @@
},
"query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n WHERE status <> 'skipped' and status <> 'successful' \n GROUP BY circuit_type, status\n "
},
"d007e0fd35d52da98e9ccca4cbdcfd09dac6bc3b03e6cf58a434d1b266026a9b": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "DELETE FROM miniblocks WHERE number = $1"
},
"d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": {
"describe": {
"columns": [
Expand Down
19 changes: 19 additions & 0 deletions core/lib/dal/src/blocks_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,25 @@ pub struct BlocksDal<'a, 'c> {
}

impl BlocksDal<'_, '_> {
pub async fn clear_dummy_snapshot_headers(
&mut self,
l1_batch_number: L1BatchNumber,
miniblock_number: MiniblockNumber,
) -> sqlx::Result<()> {
sqlx::query!(
"DELETE FROM l1_batches WHERE number = $1",
l1_batch_number.0 as i64
)
.execute(self.storage.conn())
.await?;
sqlx::query!(
"DELETE FROM miniblocks WHERE number = $1",
miniblock_number.0 as i64
)
.execute(self.storage.conn())
.await?;
Ok(())
}
pub async fn is_genesis_needed(&mut self) -> sqlx::Result<bool> {
let count = sqlx::query!("SELECT COUNT(*) as \"count!\" FROM l1_batches")
.fetch_one(self.storage.conn())
Expand Down
4 changes: 2 additions & 2 deletions core/lib/dal/src/storage_logs_snapshots_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ impl SnapshotChunksDal<'_, '_> {
H256::from_slice(&row.key),
),
value: H256::from_slice(&row.value),
miniblock_number: MiniblockNumber(row.miniblock_number as u32),
l1_batch_number: L1BatchNumber(row.l1_batch_number as u32),
miniblock_number_of_initial_write: MiniblockNumber(row.miniblock_number as u32),
l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32),
enumeration_index: row.index as u64,
})
.collect();
Expand Down
1 change: 0 additions & 1 deletion core/lib/merkle_tree/src/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,6 @@ impl TreeUpdater {
TraverseOutcome::MissingChild(nibbles) if nibbles.nibble_count() == 0 => {
// The root is currently empty; we replace it with a leaf.
let leaf_index = leaf_index_fn();
debug_assert_eq!(leaf_index, 1);
let root_leaf = LeafNode::new(key, value_hash, leaf_index);
self.set_root_node(root_leaf.into());
let leaf_data = NewLeafData::new(Nibbles::EMPTY, root_leaf);
Expand Down
9 changes: 5 additions & 4 deletions core/lib/types/src/snapshots.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,12 @@ pub struct SnapshotMetadata {

#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Snapshot {
pub metadata: SnapshotMetadata,
pub struct SnapshotHeader {
pub l1_batch_number: L1BatchNumber,
pub miniblock_number: MiniblockNumber,
pub chunks: Vec<SnapshotChunkMetadata>,
pub last_l1_batch_with_metadata: L1BatchWithMetadata,
pub generated_at: DateTime<Utc>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand Down Expand Up @@ -52,8 +53,8 @@ pub struct SnapshotChunk {
pub struct SnapshotStorageLog {
pub key: StorageKey,
pub value: StorageValue,
pub miniblock_number: MiniblockNumber,
pub l1_batch_number: L1BatchNumber,
pub miniblock_number_of_initial_write: MiniblockNumber,
pub l1_batch_number_of_initial_write: L1BatchNumber,
pub enumeration_index: u64,
}

Expand Down
4 changes: 2 additions & 2 deletions core/lib/web3_decl/src/namespaces/snapshots.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use zksync_types::snapshots::{AllSnapshots, Snapshot};
use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
use zksync_types::L1BatchNumber;

#[cfg_attr(
Expand All @@ -22,5 +22,5 @@ pub trait SnapshotsNamespace {
async fn get_snapshot_by_l1_batch_number(
&self,
l1_batch_number: L1BatchNumber,
) -> RpcResult<Option<Snapshot>>;
) -> RpcResult<Option<SnapshotHeader>>;
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use jsonrpc_derive::rpc;
// Workspace uses
use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error;
use crate::l1_gas_price::L1GasPriceProvider;
use zksync_types::snapshots::{AllSnapshots, Snapshot};
use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
use zksync_types::L1BatchNumber;

// Local uses
Expand All @@ -22,7 +22,7 @@ pub trait SnapshotsNamespaceT {
fn get_snapshot_by_l1_batch_number(
&self,
l1_batch_number: L1BatchNumber,
) -> BoxFuture<Result<Option<Snapshot>>>;
) -> BoxFuture<Result<Option<SnapshotHeader>>>;
}

impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceT for SnapshotsNamespace<G> {
Expand All @@ -39,7 +39,7 @@ impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceT for Snap
fn get_snapshot_by_l1_batch_number(
&self,
l1_batch_number: L1BatchNumber,
) -> BoxFuture<Result<Option<Snapshot>>> {
) -> BoxFuture<Result<Option<SnapshotHeader>>> {
let self_ = self.clone();
Box::pin(async move {
self_
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::api_server::web3::backend_jsonrpsee::into_jsrpc_error;
use crate::api_server::web3::namespaces::SnapshotsNamespace;
use crate::l1_gas_price::L1GasPriceProvider;
use async_trait::async_trait;
use zksync_types::snapshots::{AllSnapshots, Snapshot};
use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
use zksync_types::L1BatchNumber;
use zksync_web3_decl::jsonrpsee::core::RpcResult;
use zksync_web3_decl::namespaces::SnapshotsNamespaceServer;
Expand All @@ -20,7 +20,7 @@ impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceServer
async fn get_snapshot_by_l1_batch_number(
&self,
l1_batch_number: L1BatchNumber,
) -> RpcResult<Option<Snapshot>> {
) -> RpcResult<Option<SnapshotHeader>> {
self.get_snapshot_by_l1_batch_number_impl(l1_batch_number)
.await
.map_err(into_jsrpc_error)
Expand Down
11 changes: 7 additions & 4 deletions core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use crate::api_server::web3::state::RpcState;
use crate::l1_gas_price::L1GasPriceProvider;
use zksync_types::snapshots::{AllSnapshots, Snapshot, SnapshotChunkMetadata, SnapshotStorageKey};
use zksync_types::snapshots::{
AllSnapshots, SnapshotChunkMetadata, SnapshotHeader, SnapshotStorageKey,
};
use zksync_types::L1BatchNumber;
use zksync_web3_decl::error::Web3Error;

Expand Down Expand Up @@ -29,7 +31,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
pub async fn get_snapshot_by_l1_batch_number_impl(
&self,
l1_batch_number: L1BatchNumber,
) -> Result<Option<Snapshot>, Web3Error> {
) -> Result<Option<SnapshotHeader>, Web3Error> {
let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
let mut snapshots_dal = storage_processor.snapshots_dal();
let snapshot_files = snapshots_dal
Expand Down Expand Up @@ -67,8 +69,9 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
.get_last_miniblock_number(l1_batch_number)
.await
.unwrap();
Ok(Some(Snapshot {
metadata: snapshot_metadata,
Ok(Some(SnapshotHeader {
l1_batch_number: snapshot_metadata.l1_batch_number,
generated_at: snapshot_metadata.generated_at,
miniblock_number,
last_l1_batch_with_metadata: l1_batch_with_metadata,
chunks,
Expand Down
4 changes: 2 additions & 2 deletions core/lib/zksync_core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ async fn add_state_keeper_to_task_futures<E: L1GasPriceProvider + Send + Sync +
stop_receiver.clone(),
)
.await;
task_futures.push(tokio::spawn(state_keeper.run()));
task_futures.push(tokio::spawn(state_keeper.run(false)));

let mempool_fetcher_pool = pool_builder
.build()
Expand Down Expand Up @@ -824,7 +824,7 @@ async fn run_tree(
.build()
.await
.context("failed to build prover_pool")?;
let tree_task = tokio::spawn(metadata_calculator.run(pool, prover_pool, stop_receiver));
let tree_task = tokio::spawn(metadata_calculator.run(pool, prover_pool, stop_receiver, false));
task_futures.push(tree_task);

let elapsed = started_at.elapsed();
Expand Down
2 changes: 2 additions & 0 deletions core/lib/zksync_core/src/metadata_calculator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ impl MetadataCalculator {
pool: ConnectionPool,
prover_pool: ConnectionPool,
stop_receiver: watch::Receiver<bool>,
stop_after_one_batch: bool,
) -> anyhow::Result<()> {
self.updater
.loop_updating_tree(
Expand All @@ -147,6 +148,7 @@ impl MetadataCalculator {
&prover_pool,
stop_receiver,
self.health_updater,
stop_after_one_batch,
)
.await
}
Expand Down
4 changes: 4 additions & 0 deletions core/lib/zksync_core/src/metadata_calculator/updater.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ impl TreeUpdater {
prover_pool: &ConnectionPool,
mut stop_receiver: watch::Receiver<bool>,
health_updater: HealthUpdater,
stop_after_one_batch: bool,
) -> anyhow::Result<()> {
let mut storage = pool
.access_storage_tagged("metadata_calculator")
Expand Down Expand Up @@ -374,6 +375,9 @@ impl TreeUpdater {
let snapshot = *next_l1_batch_to_seal;
self.step(storage, prover_storage, &mut next_l1_batch_to_seal)
.await;
if stop_after_one_batch {
return Ok(());
}
let delay = if snapshot == *next_l1_batch_to_seal {
tracing::trace!(
"Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) \
Expand Down
Loading

0 comments on commit d4618d5

Please sign in to comment.