From 1cfd426a4f38303e4bfefdb4c594945ba6046a6b Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Fri, 1 Nov 2024 11:17:28 +0200 Subject: [PATCH 1/2] feat(contract-verifier): add compilers 1.5.7 (#3219) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ add compilers 1.5.7 to contract verifier ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- docker/contract-verifier/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index e9d83903d11..b1b63429a63 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 7); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 7); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ From 069d38d6c9ddd8b6c404596c479f94b9fc86db40 Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Fri, 1 Nov 2024 10:53:31 +0000 Subject: [PATCH 2/2] feat: add `block.timestamp` asserter for AA (#3031) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds the ability to use `block.timestamp` in custom AA contracts. AAs will still not have direct access to `block.timestamp`, but can utilize it via a proxy that enforces certain constraints. The PR introduces a `TimestampAsserter` contract that is deployed on every chain to the user space, similar to `Multicall3`. This contract has a single function, `assertTimestampInRange(start, end)`, which can be used by AAs at their discretion. The `TimestampAsserter` contract ensures that `block.timestamp` falls within the specified `(start, end)` range. Additionally, the sequencer verifies that the `block.timestamp` is sufficiently far from the range’s end. This is to prevent DoS attacks where transactions pass validation but get stuck in the mempool during execution. This constraint is configurable and can be adjusted without requiring protocol update. The PR also introduces two new fields to the `transactions` table: `timestamp_asserter_range_start` and `timestamp_asserter_range_end`. These fields are extracted during transaction execution in the sandbox by the `ValidationTracer`. If multiple assertions are made in a single transaction, the system captures the maximum of the starts and the minimum of the ends, resulting in the narrowest possible time range. Transactions with time range constraints will undergo additional verification before being included in a block. If the current time falls outside the transaction’s specified time range, the transaction will be rejected with an appropriate message. Sister PR in `era-contracts`: https://github.com/matter-labs/era-contracts/pull/843 --------- Signed-off-by: Danil Co-authored-by: Danil --- contracts | 2 +- core/bin/external_node/src/config/mod.rs | 84 +++++-- core/bin/external_node/src/config/tests.rs | 1 + core/bin/zksync_server/src/main.rs | 3 +- core/bin/zksync_server/src/node_builder.rs | 19 +- core/lib/config/src/configs/chain.rs | 8 +- core/lib/config/src/configs/contracts.rs | 2 + core/lib/config/src/configs/general.rs | 6 +- core/lib/config/src/testonly.rs | 11 + ...a2d505a1aabf52ff4136d2ed1b39c70dd1632.json | 12 + ...2826095e9290f0c1157094bd0c44e06012e42.json | 12 + ...a77a428820fdcea9969aff3b29ca16727357b.json | 12 + ...802690bdd139505ba22be3655e306773abc77.json | 42 ++++ ...c5223c9d5e2e42d89bb456d24c601edc06a05.json | 40 ---- ...64a8e245ecee4866264d38146938595b07f37.json | 12 + ...7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json | 12 + ...3940_add_block_timestamp_asserter.down.sql | 3 + ...073940_add_block_timestamp_asserter.up.sql | 3 + core/lib/dal/src/blocks_web3_dal.rs | 8 +- .../lib/dal/src/models/storage_transaction.rs | 21 +- core/lib/dal/src/pruning_dal/tests.rs | 8 +- core/lib/dal/src/sync_dal.rs | 8 +- core/lib/dal/src/tests/mod.rs | 46 +++- core/lib/dal/src/transactions_dal.rs | 71 +++++- core/lib/dal/src/transactions_web3_dal.rs | 20 +- core/lib/env_config/src/contracts.rs | 2 + core/lib/env_config/src/lib.rs | 1 + core/lib/env_config/src/timestamp_asserter.rs | 34 +++ core/lib/mempool/src/mempool_store.rs | 48 +++- core/lib/mempool/src/tests.rs | 49 ++-- core/lib/mempool/src/types.rs | 35 +-- core/lib/multivm/src/tracers/validator/mod.rs | 67 +++--- .../src/tracers/validator/vm_latest/mod.rs | 50 ++++- core/lib/protobuf_config/src/contracts.rs | 9 + core/lib/protobuf_config/src/general.rs | 5 + core/lib/protobuf_config/src/lib.rs | 1 + .../src/proto/config/contracts.proto | 1 + .../src/proto/config/general.proto | 2 + .../src/proto/config/timestamp_asserter.proto | 7 + .../protobuf_config/src/timestamp_asserter.rs | 19 ++ core/lib/types/src/lib.rs | 7 +- core/lib/vm_executor/src/oneshot/mock.rs | 29 ++- core/lib/vm_executor/src/oneshot/mod.rs | 19 +- core/lib/vm_interface/src/executor.rs | 4 +- .../lib/vm_interface/src/types/errors/halt.rs | 4 + core/lib/vm_interface/src/types/tracer.rs | 75 ++++++- core/lib/web3_decl/src/namespaces/zks.rs | 3 + .../src/temp_config_store/mod.rs | 5 +- .../api_server/src/execution_sandbox/error.rs | 3 + .../src/execution_sandbox/execute.rs | 8 +- .../api_server/src/execution_sandbox/tests.rs | 2 + .../src/execution_sandbox/validate.rs | 10 +- .../src/tx_sender/master_pool_sink.rs | 5 +- core/node/api_server/src/tx_sender/mod.rs | 27 ++- core/node/api_server/src/tx_sender/proxy.rs | 39 +++- core/node/api_server/src/tx_sender/result.rs | 6 + .../api_server/src/tx_sender/tests/mod.rs | 2 +- .../api_server/src/tx_sender/tests/send_tx.rs | 100 ++++++++- core/node/api_server/src/tx_sender/tx_sink.rs | 3 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 4 + .../api_server/src/web3/namespaces/zks.rs | 4 + core/node/api_server/src/web3/state.rs | 2 + core/node/api_server/src/web3/testonly.rs | 1 + core/node/api_server/src/web3/tests/mod.rs | 22 +- core/node/eth_watch/src/tests.rs | 3 + core/node/node_sync/src/external_io.rs | 1 + core/node/state_keeper/Cargo.toml | 1 + core/node/state_keeper/src/io/common/tests.rs | 8 +- core/node/state_keeper/src/io/mempool.rs | 24 +- core/node/state_keeper/src/io/mod.rs | 7 +- .../io/seal_logic/l2_block_seal_subtasks.rs | 4 +- core/node/state_keeper/src/io/tests/mod.rs | 151 ++++++++++++- core/node/state_keeper/src/io/tests/tester.rs | 16 +- core/node/state_keeper/src/keeper.rs | 3 +- core/node/state_keeper/src/mempool_actor.rs | 36 ++- .../state_keeper/src/seal_criteria/mod.rs | 1 + .../src/testonly/test_batch_executor.rs | 1 + core/node/state_keeper/src/types.rs | 34 ++- core/node/vm_runner/src/tests/mod.rs | 10 +- .../custom-account/custom-account.sol | 19 +- core/tests/ts-integration/src/env.ts | 19 +- core/tests/ts-integration/src/types.ts | 2 + .../tests/custom-account.test.ts | 211 +++++++++++++++++- etc/env/file_based/general.yaml | 3 + zkstack_cli/crates/config/src/contracts.rs | 11 +- .../deploy_l2_contracts/output.rs | 7 + .../src/commands/chain/deploy_l2_contracts.rs | 4 +- 87 files changed, 1491 insertions(+), 265 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json delete mode 100644 core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json create mode 100644 core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql create mode 100644 core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql create mode 100644 core/lib/env_config/src/timestamp_asserter.rs create mode 100644 core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto create mode 100644 core/lib/protobuf_config/src/timestamp_asserter.rs diff --git a/contracts b/contracts index 84d5e3716f6..9fb1264fce8 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 84d5e3716f645909e8144c7d50af9dd6dd9ded62 +Subproject commit 9fb1264fce8c0ebeefe8bf1846e89876027161d2 diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 70803a66311..0a94f993656 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1,6 +1,7 @@ use std::{ env, ffi::OsString, + future::Future, num::{NonZeroU32, NonZeroU64, NonZeroUsize}, path::PathBuf, time::Duration, @@ -24,7 +25,7 @@ use zksync_core_leftovers::temp_config_store::read_yaml_repr; use zksync_dal::{ConnectionPool, Core}; use zksync_metadata_calculator::MetadataCalculatorRecoveryConfig; use zksync_node_api_server::{ - tx_sender::TxSenderConfig, + tx_sender::{TimestampAsserterParams, TxSenderConfig}, web3::{state::InternalApiConfig, Namespace}, }; use zksync_protobuf_config::proto; @@ -121,6 +122,7 @@ pub(crate) struct RemoteENConfig { pub l1_weth_bridge_addr: Option
, pub l2_weth_bridge_addr: Option
, pub l2_testnet_paymaster_addr: Option
, + pub l2_timestamp_asserter_addr: Option
, pub base_token_addr: Address, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub dummy_verifier: bool, @@ -146,22 +148,19 @@ impl RemoteENConfig { .get_main_contract() .rpc_context("get_main_contract") .await?; - let base_token_addr = match client.get_base_token_l1_address().await { - Err(ClientError::Call(err)) - if [ - ErrorCode::MethodNotFound.code(), - // This what `Web3Error::NotImplemented` gets - // `casted` into in the `api` server. - ErrorCode::InternalError.code(), - ] - .contains(&(err.code())) => - { - // This is the fallback case for when the EN tries to interact - // with a node that does not implement the `zks_baseTokenL1Address` endpoint. - ETHEREUM_ADDRESS - } - response => response.context("Failed to fetch base token address")?, - }; + + let timestamp_asserter_address = handle_rpc_response_with_fallback( + client.get_timestamp_asserter(), + None, + "Failed to fetch timestamp asserter address".to_string(), + ) + .await?; + let base_token_addr = handle_rpc_response_with_fallback( + client.get_base_token_l1_address(), + ETHEREUM_ADDRESS, + "Failed to fetch base token address".to_string(), + ) + .await?; // These two config variables should always have the same value. // TODO(EVM-578): double check and potentially forbid both of them being `None`. @@ -206,6 +205,7 @@ impl RemoteENConfig { .as_ref() .map(|a| a.dummy_verifier) .unwrap_or_default(), + l2_timestamp_asserter_addr: timestamp_asserter_address, }) } @@ -227,10 +227,36 @@ impl RemoteENConfig { l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(7)), l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, dummy_verifier: true, + l2_timestamp_asserter_addr: None, } } } +async fn handle_rpc_response_with_fallback( + rpc_call: F, + fallback: T, + context: String, +) -> anyhow::Result +where + F: Future>, + T: Clone, +{ + match rpc_call.await { + Err(ClientError::Call(err)) + if [ + ErrorCode::MethodNotFound.code(), + // This what `Web3Error::NotImplemented` gets + // `casted` into in the `api` server. + ErrorCode::InternalError.code(), + ] + .contains(&(err.code())) => + { + Ok(fallback) + } + response => response.context(context), + } +} + /// This part of the external node config is completely optional to provide. /// It can tweak limits of the API, delay intervals of certain components, etc. /// If any of the fields are not provided, the default values will be used. @@ -454,6 +480,9 @@ pub(crate) struct OptionalENConfig { pub gateway_url: Option, /// Interval for bridge addresses refreshing in seconds. bridge_addresses_refresh_interval_sec: Option, + /// Minimum time between current block.timestamp and the end of the asserted range for TimestampAsserter + #[serde(default = "OptionalENConfig::default_timestamp_asserter_min_time_till_end_sec")] + pub timestamp_asserter_min_time_till_end_sec: u32, } impl OptionalENConfig { @@ -685,6 +714,11 @@ impl OptionalENConfig { contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, + timestamp_asserter_min_time_till_end_sec: general_config + .timestamp_asserter_config + .as_ref() + .map(|x| x.min_time_till_end_sec) + .unwrap_or_else(Self::default_timestamp_asserter_min_time_till_end_sec), }) } @@ -819,6 +853,10 @@ impl OptionalENConfig { 3_600 * 24 * 7 // 7 days } + const fn default_timestamp_asserter_min_time_till_end_sec() -> u32 { + 60 + } + fn from_env() -> anyhow::Result { let mut result: OptionalENConfig = envy::prefixed("EN_") .from_env() @@ -1425,6 +1463,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { filters_disabled: config.optional.filters_disabled, dummy_verifier: config.remote.dummy_verifier, l1_batch_commit_data_generator_mode: config.remote.l1_batch_commit_data_generator_mode, + timestamp_asserter_address: config.remote.l2_timestamp_asserter_addr, } } } @@ -1447,6 +1486,17 @@ impl From<&ExternalNodeConfig> for TxSenderConfig { chain_id: config.required.l2_chain_id, // Does not matter for EN. whitelisted_tokens_for_aa: Default::default(), + timestamp_asserter_params: config.remote.l2_timestamp_asserter_addr.map(|address| { + TimestampAsserterParams { + address, + min_time_till_end: Duration::from_secs( + config + .optional + .timestamp_asserter_min_time_till_end_sec + .into(), + ), + } + }), } } } diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index a32be3eff72..dc74d124b18 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -128,6 +128,7 @@ fn parsing_optional_config_from_env() { "zks_getProof=100,eth_call=2", ), ("EN_L1_BATCH_COMMIT_DATA_GENERATOR_MODE", "Validium"), + ("EN_TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC", "2"), ]; let env_vars = env_vars .into_iter() diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 855f50df141..51e7b409c9a 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -7,7 +7,7 @@ use zksync_config::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, + StateKeeperConfig, TimestampAsserterConfig, }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, @@ -195,5 +195,6 @@ fn load_env_config() -> anyhow::Result { external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), prover_job_monitor_config: None, + timestamp_asserter_config: TimestampAsserterConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e7a3dca77f1..32478ede5bf 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -1,6 +1,8 @@ //! This module provides a "builder" for the main node, //! as well as an interface to run the node with the specified components. +use std::time::Duration; + use anyhow::{bail, Context}; use zksync_config::{ configs::{ @@ -12,7 +14,7 @@ use zksync_config::{ use zksync_core_leftovers::Component; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ - tx_sender::TxSenderConfig, + tx_sender::{TimestampAsserterParams, TxSenderConfig}, web3::{state::InternalApiConfig, Namespace}, }; use zksync_node_framework::{ @@ -303,6 +305,20 @@ impl MainNodeBuilder { fn add_tx_sender_layer(mut self) -> anyhow::Result { let sk_config = try_load_config!(self.configs.state_keeper_config); let rpc_config = try_load_config!(self.configs.api_config).web3_json_rpc; + + let timestamp_asserter_params = match self.contracts_config.l2_timestamp_asserter_addr { + Some(address) => { + let timestamp_asserter_config = + try_load_config!(self.configs.timestamp_asserter_config); + Some(TimestampAsserterParams { + address, + min_time_till_end: Duration::from_secs( + timestamp_asserter_config.min_time_till_end_sec.into(), + ), + }) + } + None => None, + }; let postgres_storage_caches_config = PostgresStorageCachesConfig { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, @@ -322,6 +338,7 @@ impl MainNodeBuilder { .fee_account .address(), self.genesis_config.l2_chain_id, + timestamp_asserter_params, ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index c117064dbc4..d73dce81b13 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -1,6 +1,6 @@ use std::{str::FromStr, time::Duration}; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, Address, L2ChainId, H256, }; @@ -244,3 +244,9 @@ impl MempoolConfig { Duration::from_millis(self.delay_interval) } } + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +pub struct TimestampAsserterConfig { + /// Minimum time between current block.timestamp and the end of the asserted range + pub min_time_till_end_sec: u32, +} diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 0bf7aab3bca..38576833fa3 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -41,6 +41,7 @@ pub struct ContractsConfig { pub l1_weth_bridge_proxy_addr: Option
, pub l2_weth_bridge_addr: Option
, pub l2_testnet_paymaster_addr: Option
, + pub l2_timestamp_asserter_addr: Option
, pub l1_multicall3_addr: Address, pub ecosystem_contracts: Option, // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. @@ -65,6 +66,7 @@ impl ContractsConfig { l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), l1_multicall3_addr: Address::repeat_byte(0x12), + l2_timestamp_asserter_addr: Some(Address::repeat_byte(0x19)), governance_addr: Address::repeat_byte(0x13), base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index bb733510f77..dfb81af1cf8 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -1,7 +1,10 @@ use crate::{ configs::{ base_token_adjuster::BaseTokenAdjusterConfig, - chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, + chain::{ + CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig, + TimestampAsserterConfig, + }, consensus::ConsensusConfig, da_client::DAClientConfig, da_dispatcher::DADispatcherConfig, @@ -56,4 +59,5 @@ pub struct GeneralConfig { pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, pub prover_job_monitor_config: Option, + pub timestamp_asserter_config: Option, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 49c5cff1dca..72df871d7ce 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -18,6 +18,7 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ self, + chain::TimestampAsserterConfig, da_client::{ avail::{AvailClientConfig, AvailDefaultConfig}, DAClientConfig::Avail, @@ -265,6 +266,7 @@ impl Distribution for EncodeDist { l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), + l2_timestamp_asserter_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), ecosystem_contracts: self.sample(rng), base_token_addr: self.sample_opt(|| rng.gen()), @@ -1181,6 +1183,15 @@ impl Distribution for EncodeDist { external_proof_integration_api_config: self.sample(rng), experimental_vm_config: self.sample(rng), prover_job_monitor_config: self.sample(rng), + timestamp_asserter_config: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> TimestampAsserterConfig { + TimestampAsserterConfig { + min_time_till_end_sec: self.sample(rng), } } } diff --git a/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json b/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json index 7b939d137db..b84cd1bcba8 100644 --- a/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json +++ b/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -223,6 +233,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json b/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json index 707b7ce9e75..2f4203aaa32 100644 --- a/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json +++ b/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -225,6 +235,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json index 1d27af2bbc1..50d3ce5188d 100644 --- a/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json +++ b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -228,6 +238,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json b/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json new file mode 100644 index 00000000000..2edb0822ac6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n timestamp_asserter_range_start,\n timestamp_asserter_range_end,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info\n = JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n in_mempool = FALSE,\n received_at = $19,\n timestamp_asserter_range_start = $20,\n timestamp_asserter_range_end = $21,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_replaced!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Numeric", + "Bytea", + "Jsonb", + "Int4", + "Bytea", + "Numeric", + "Bytea", + "Bytea", + "Int8", + "Int4", + "Int4", + "Timestamp", + "Timestamp", + "Timestamp" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77" +} diff --git a/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json b/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json deleted file mode 100644 index c234cbe4235..00000000000 --- a/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n $19,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info\n = JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n in_mempool = FALSE,\n received_at = $19,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "is_replaced!", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Numeric", - "Bytea", - "Jsonb", - "Int4", - "Bytea", - "Numeric", - "Bytea", - "Bytea", - "Int8", - "Int4", - "Int4", - "Timestamp" - ] - }, - "nullable": [ - null - ] - }, - "hash": "ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05" -} diff --git a/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json index 2419082dcc2..079ce55bd56 100644 --- a/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json +++ b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -226,6 +236,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json b/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json index 2cd001b274d..8c43f8865ac 100644 --- a/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json +++ b/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -225,6 +235,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql new file mode 100644 index 00000000000..87f6a8cb75a --- /dev/null +++ b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE transactions +DROP COLUMN timestamp_asserter_range_start, +DROP COLUMN timestamp_asserter_range_end; diff --git a/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql new file mode 100644 index 00000000000..103a22cb8e3 --- /dev/null +++ b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE transactions +ADD COLUMN timestamp_asserter_range_start TIMESTAMP DEFAULT NULL, +ADD COLUMN timestamp_asserter_range_end TIMESTAMP DEFAULT NULL; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 4cb57798638..ba843bbf92f 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -803,7 +803,7 @@ mod tests { block::{L2BlockHasher, L2BlockHeader}, Address, L2BlockNumber, ProtocolVersion, ProtocolVersionId, }; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -1090,7 +1090,11 @@ mod tests { let mut tx_results = vec![]; for (i, tx) in transactions.into_iter().enumerate() { conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut tx_result = mock_execution_result(tx); diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index dbd4fa94752..459a3ec0c0f 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -12,9 +12,9 @@ use zksync_types::{ transaction_request::PaymasterParams, web3::Bytes, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, - PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, EIP_1559_TX_TYPE, - EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, - PROTOCOL_UPGRADE_TX_TYPE, U256, U64, + PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, + TransactionTimeRangeConstraint, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, + H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, U64, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; use zksync_vm_interface::Call; @@ -68,6 +68,9 @@ pub struct StorageTransaction { pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, + pub timestamp_asserter_range_start: Option, + pub timestamp_asserter_range_end: Option, + // DEPRECATED. pub l1_block_number: Option, } @@ -321,6 +324,18 @@ impl From for Transaction { } } +impl From<&StorageTransaction> for TransactionTimeRangeConstraint { + fn from(tx: &StorageTransaction) -> Self { + Self { + timestamp_asserter_range: tx.timestamp_asserter_range_start.and_then(|start| { + tx.timestamp_asserter_range_end.map(|end| { + (start.and_utc().timestamp() as u64)..(end.and_utc().timestamp() as u64) + }) + }), + } + } +} + #[derive(sqlx::FromRow)] pub(crate) struct StorageTransactionReceipt { pub error: Option, diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 4f94ff7f63d..70dda48d8c8 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -5,7 +5,7 @@ use zksync_types::{ tx::IncludedTxLocation, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, StorageLog, H256, }; -use zksync_vm_interface::TransactionExecutionMetrics; +use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -457,7 +457,11 @@ async fn transactions_are_handled_correctly_after_pruning() { let tx = mock_l2_transaction(); let tx_hash = tx.hash(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 55e6543c028..4372a83f1fe 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -113,7 +113,7 @@ mod tests { block::{L1BatchHeader, L2BlockHeader}, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -168,7 +168,11 @@ mod tests { }; let tx = mock_l2_transaction(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index baa2ee58485..11d4e55a55a 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -17,8 +17,8 @@ use zksync_types::{ L2ChainId, PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, U256, }; use zksync_vm_interface::{ - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionMetrics, + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionMetrics, }; use crate::{ @@ -210,14 +210,22 @@ async fn workflow_with_submit_tx_equal_hashes() { let tx = mock_l2_transaction(); let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); assert_eq!(result, L2TxSubmissionResult::Added); let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -236,7 +244,11 @@ async fn workflow_with_submit_tx_diff_hashes() { let initiator_address = tx.common_data.initiator_address; let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -246,7 +258,11 @@ async fn workflow_with_submit_tx_diff_hashes() { tx.common_data.nonce = nonce; tx.common_data.initiator_address = initiator_address; let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -270,13 +286,21 @@ async fn remove_stuck_txs() { let mut tx = mock_l2_transaction(); tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); // Tx in mempool let tx = mock_l2_transaction(); transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -293,7 +317,11 @@ async fn remove_stuck_txs() { executed_tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; transactions_dal - .insert_transaction_l2(&executed_tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &executed_tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 5314e9799b3..9c0889ebfc7 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, time::Duration}; +use std::{cmp::min, collections::HashMap, fmt, time::Duration}; use bigdecimal::BigDecimal; use itertools::Itertools; @@ -12,12 +12,13 @@ use zksync_db_connection::{ use zksync_types::{ block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, - L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, - PROTOCOL_UPGRADE_TX_TYPE, U256, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, + TransactionTimeRangeConstraint, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ - Call, TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, + tracer::ValidationTraces, Call, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, }; use crate::{ @@ -264,6 +265,7 @@ impl TransactionsDal<'_, '_> { &mut self, tx: &L2Tx, exec_info: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> DalResult { let tx_hash = tx.hash(); let is_duplicate = sqlx::query!( @@ -314,6 +316,16 @@ impl TransactionsDal<'_, '_> { let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; #[allow(deprecated)] let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); + let max_timestamp = NaiveDateTime::MAX.and_utc().timestamp() as u64; + #[allow(deprecated)] + let timestamp_asserter_range_start = + validation_traces.timestamp_asserter_range.clone().map(|x| { + NaiveDateTime::from_timestamp_opt(min(x.start, max_timestamp) as i64, 0).unwrap() + }); + #[allow(deprecated)] + let timestamp_asserter_range_end = validation_traces.timestamp_asserter_range.map(|x| { + NaiveDateTime::from_timestamp_opt(min(x.end, max_timestamp) as i64, 0).unwrap() + }); // Besides just adding or updating(on conflict) the record, we want to extract some info // from the query below, to indicate what actually happened: // 1) transaction is added @@ -346,6 +358,8 @@ impl TransactionsDal<'_, '_> { paymaster_input, execution_info, received_at, + timestamp_asserter_range_start, + timestamp_asserter_range_end, created_at, updated_at ) @@ -376,6 +390,8 @@ impl TransactionsDal<'_, '_> { $18::INT ), $19, + $20, + $21, NOW(), NOW() ) @@ -406,6 +422,8 @@ impl TransactionsDal<'_, '_> { ), in_mempool = FALSE, received_at = $19, + timestamp_asserter_range_start = $20, + timestamp_asserter_range_end = $21, created_at = NOW(), updated_at = NOW(), error = NULL @@ -441,7 +459,9 @@ impl TransactionsDal<'_, '_> { exec_info.gas_used as i64, (exec_info.initial_storage_writes + exec_info.repeated_storage_writes) as i32, exec_info.contracts_used as i32, - received_at + received_at, + timestamp_asserter_range_start, + timestamp_asserter_range_end, ) .instrument("insert_transaction_l2") .with_arg("tx_hash", &tx_hash) @@ -1728,7 +1748,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata: u32, fee_per_gas: u64, limit: usize, - ) -> DalResult> { + ) -> DalResult> { let stashed_addresses: Vec<_> = stashed_accounts.iter().map(Address::as_bytes).collect(); sqlx::query!( r#" @@ -1819,8 +1839,14 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage) .await?; - let transactions = transactions.into_iter().map(|tx| tx.into()).collect(); - Ok(transactions) + let transactions_with_constraints = transactions + .into_iter() + .map(|tx| { + let constraint = TransactionTimeRangeConstraint::from(&tx); + (tx.into(), constraint) + }) + .collect(); + Ok(transactions_with_constraints) } pub async fn reset_mempool(&mut self) -> DalResult<()> { @@ -2212,6 +2238,29 @@ impl TransactionsDal<'_, '_> { .fetch_optional(self.storage) .await } + + pub async fn get_storage_tx_by_hash( + &mut self, + hash: H256, + ) -> DalResult> { + sqlx::query_as!( + StorageTransaction, + r#" + SELECT + * + FROM + transactions + WHERE + hash = $1 + "#, + hash.as_bytes() + ) + .map(Into::into) + .instrument("get_storage_tx_by_hash") + .with_arg("hash", &hash) + .fetch_optional(self.storage) + .await + } } #[cfg(test)] @@ -2240,7 +2289,11 @@ mod tests { let tx = mock_l2_transaction(); let tx_hash = tx.hash(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut tx_result = mock_execution_result(tx); diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index c2209bb9c93..44d7ed89c47 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -493,7 +493,7 @@ mod tests { use std::collections::HashMap; use zksync_types::{l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId}; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -509,7 +509,11 @@ mod tests { for tx in &txs { conn.transactions_dal() - .insert_transaction_l2(tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); } @@ -747,7 +751,11 @@ mod tests { tx.common_data.initiator_address = initiator; tx_by_nonce.insert(nonce, tx.clone()); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); } @@ -816,7 +824,11 @@ mod tests { tx.common_data.nonce = Nonce(1); tx.common_data.initiator_address = initiator; conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3792f356be4..ae4e1d1d5b4 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -74,6 +74,7 @@ mod tests { base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), l2_da_validator_addr: Some(addr("0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + l2_timestamp_asserter_addr: Some(addr("0x0000000000000000000000000000000000000002")), } } @@ -102,6 +103,7 @@ CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347 CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR="0x0000000000000000000000000000000000000002" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index b72c2c5d5b9..325288056b3 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -33,6 +33,7 @@ mod vm_runner; mod wallets; mod da_client; +mod timestamp_asserter; pub trait FromEnv: Sized { fn from_env() -> anyhow::Result; diff --git a/core/lib/env_config/src/timestamp_asserter.rs b/core/lib/env_config/src/timestamp_asserter.rs new file mode 100644 index 00000000000..df586f5925e --- /dev/null +++ b/core/lib/env_config/src/timestamp_asserter.rs @@ -0,0 +1,34 @@ +use zksync_config::configs::chain::TimestampAsserterConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for TimestampAsserterConfig { + fn from_env() -> anyhow::Result { + envy_load("timestamp_asserter", "TIMESTAMP_ASSERTER_") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + #[test] + fn from_env_timestamp_asserter() { + let mut lock = MUTEX.lock(); + let config = r#" + TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC=2 + "#; + lock.set_env(config); + + let actual = TimestampAsserterConfig::from_env().unwrap(); + assert_eq!( + actual, + TimestampAsserterConfig { + min_time_till_end_sec: 2, + } + ); + } +} diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index f6f9b72f9b6..70176b456dd 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -2,6 +2,7 @@ use std::collections::{hash_map, BTreeSet, HashMap}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, + TransactionTimeRangeConstraint, }; use crate::types::{AccountTransactions, L2TxFilter, MempoolScore}; @@ -54,10 +55,10 @@ impl MempoolStore { /// in other cases mempool relies on state keeper and its internal state to keep that info up to date pub fn insert( &mut self, - transactions: Vec, + transactions: Vec<(Transaction, TransactionTimeRangeConstraint)>, initial_nonces: HashMap, ) { - for transaction in transactions { + for (transaction, constraint) in transactions { let Transaction { common_data, execute, @@ -85,6 +86,7 @@ impl MempoolStore { received_timestamp_ms, raw_bytes, }, + constraint, &initial_nonces, ); } @@ -95,20 +97,36 @@ impl MempoolStore { } } + #[cfg(test)] + pub fn insert_without_constraints( + &mut self, + transactions: Vec, + initial_nonces: HashMap, + ) { + self.insert( + transactions + .into_iter() + .map(|x| (x, TransactionTimeRangeConstraint::default())) + .collect(), + initial_nonces, + ); + } + fn insert_l2_transaction( &mut self, transaction: L2Tx, + constraint: TransactionTimeRangeConstraint, initial_nonces: &HashMap, ) { let account = transaction.initiator_account(); let metadata = match self.l2_transactions_per_account.entry(account) { - hash_map::Entry::Occupied(mut txs) => txs.get_mut().insert(transaction), + hash_map::Entry::Occupied(mut txs) => txs.get_mut().insert(transaction, constraint), hash_map::Entry::Vacant(entry) => { let account_nonce = initial_nonces.get(&account).cloned().unwrap_or(Nonce(0)); entry .insert(AccountTransactions::new(account_nonce)) - .insert(transaction) + .insert(transaction, constraint) } }; if let Some(score) = metadata.previous_score { @@ -133,10 +151,17 @@ impl MempoolStore { } /// Returns next transaction for execution from mempool - pub fn next_transaction(&mut self, filter: &L2TxFilter) -> Option { + pub fn next_transaction( + &mut self, + filter: &L2TxFilter, + ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { if let Some(transaction) = self.l1_transactions.remove(&self.next_priority_id) { self.next_priority_id += 1; - return Some(transaction.into()); + // L1 transactions can't use block.timestamp in AA and hence do not need to have a constraint + return Some(( + transaction.into(), + TransactionTimeRangeConstraint::default(), + )); } let mut removed = 0; @@ -163,7 +188,7 @@ impl MempoolStore { self.stashed_accounts.push(stashed_pointer.account); } // insert pointer to the next transaction if it exists - let (transaction, score) = self + let (transaction, constraint, score) = self .l2_transactions_per_account .get_mut(&tx_pointer.account) .expect("mempool: dangling pointer in priority queue") @@ -176,28 +201,31 @@ impl MempoolStore { .size .checked_sub((removed + 1) as u64) .expect("mempool size can't be negative"); - Some(transaction.into()) + Some((transaction.into(), constraint)) } /// When a state_keeper starts the block over after a rejected transaction, /// we have to rollback the nonces/ids in the mempool and /// reinsert the transactions from the block back into mempool. - pub fn rollback(&mut self, tx: &Transaction) { + pub fn rollback(&mut self, tx: &Transaction) -> TransactionTimeRangeConstraint { // rolling back the nonces and priority ids match &tx.common_data { ExecuteTransactionCommon::L1(data) => { // reset next priority id self.next_priority_id = self.next_priority_id.min(data.serial_id); + TransactionTimeRangeConstraint::default() } ExecuteTransactionCommon::L2(_) => { - if let Some(score) = self + if let Some((score, constraint)) = self .l2_transactions_per_account .get_mut(&tx.initiator_account()) .expect("account is not available in mempool") .reset(tx) { self.l2_priority_queue.remove(&score); + return constraint; } + TransactionTimeRangeConstraint::default() } ExecuteTransactionCommon::ProtocolUpgrade(_) => { panic!("Protocol upgrade tx is not supposed to be in mempool"); diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index b84ab7d5765..d40158ae955 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -9,7 +9,7 @@ use zksync_types::{ l1::{OpProcessingType, PriorityQueueType}, l2::L2Tx, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, Nonce, PriorityOpId, Transaction, - H256, U256, + TransactionTimeRangeConstraint, H256, U256, }; use crate::{mempool_store::MempoolStore, types::L2TxFilter}; @@ -27,7 +27,7 @@ fn basic_flow() { gen_l2_tx(account1, Nonce(1)), ]; assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account0, 0) @@ -46,7 +46,7 @@ fn basic_flow() { ); assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); // unclog second account and insert more transactions - mempool.insert( + mempool.insert_without_constraints( vec![gen_l2_tx(account1, Nonce(0)), gen_l2_tx(account0, Nonce(3))], HashMap::new(), ); @@ -72,10 +72,10 @@ fn missing_txns() { ]; let mut nonces = HashMap::new(); nonces.insert(account, Nonce(5)); - mempool.insert(transactions, nonces); + mempool.insert_without_constraints(transactions, nonces); assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); // missing transaction unclogs mempool - mempool.insert(vec![gen_l2_tx(account, Nonce(5))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(5))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 5) @@ -90,7 +90,7 @@ fn missing_txns() { ); // filling remaining gap - mempool.insert(vec![gen_l2_tx(account, Nonce(8))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(8))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 8) @@ -110,10 +110,11 @@ fn prioritize_l1_txns() { gen_l2_tx(account, Nonce(1)), gen_l1_tx(PriorityOpId(0)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert!(mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .is_l1()) } @@ -125,13 +126,14 @@ fn l1_txns_priority_id() { gen_l1_tx(PriorityOpId(2)), gen_l1_tx(PriorityOpId(3)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert!(mempool.next_transaction(&L2TxFilter::default()).is_none()); - mempool.insert(vec![gen_l1_tx(PriorityOpId(0))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l1_tx(PriorityOpId(0))], HashMap::new()); for idx in 0..4 { let data = mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .common_data; match data { ExecuteTransactionCommon::L1(data) => { @@ -153,7 +155,7 @@ fn rejected_tx() { gen_l2_tx(account, Nonce(3)), gen_l2_tx(account, Nonce(5)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 0) @@ -167,7 +169,7 @@ fn rejected_tx() { assert!(mempool.next_transaction(&L2TxFilter::default()).is_none()); // replace transaction and unblock account - mempool.insert(vec![gen_l2_tx(account, Nonce(1))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(1))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 1) @@ -186,9 +188,9 @@ fn rejected_tx() { fn replace_tx() { let mut mempool = MempoolStore::new(PriorityOpId(0), 100); let account = Address::random(); - mempool.insert(vec![gen_l2_tx(account, Nonce(0))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(0))], HashMap::new()); // replace it - mempool.insert( + mempool.insert_without_constraints( vec![gen_l2_tx_with_timestamp( account, Nonce(0), @@ -206,7 +208,7 @@ fn two_ready_txs() { let account0 = Address::random(); let account1 = Address::random(); let transactions = vec![gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account1, Nonce(0))]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( HashSet::<(_, _)>::from_iter(vec![ view(mempool.next_transaction(&L2TxFilter::default())), @@ -228,10 +230,10 @@ fn mempool_size() { gen_l2_tx(account0, Nonce(3)), gen_l2_tx(account1, Nonce(1)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!(mempool.stats().l2_transaction_count, 5); // replacement - mempool.insert(vec![gen_l2_tx(account0, Nonce(2))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account0, Nonce(2))], HashMap::new()); assert_eq!(mempool.stats().l2_transaction_count, 5); // load next mempool.next_transaction(&L2TxFilter::default()); @@ -261,7 +263,7 @@ fn filtering() { // First account will have two transactions: one with too low pubdata price and one with the right value. // Second account will have just one transaction with the right value. - mempool.insert( + mempool.insert_without_constraints( gen_transactions_for_filtering(vec![ (account0, Nonce(0), unix_timestamp_ms(), 0), (account0, Nonce(1), unix_timestamp_ms(), 1), @@ -302,7 +304,7 @@ fn stashed_accounts() { let account0 = Address::random(); let account1 = Address::random(); - mempool.insert( + mempool.insert_without_constraints( gen_transactions_for_filtering(vec![ (account0, Nonce(0), unix_timestamp_ms(), 0), (account0, Nonce(1), unix_timestamp_ms(), 1), @@ -334,7 +336,7 @@ fn mempool_capacity() { gen_l2_tx_with_timestamp(account2, Nonce(0), unix_timestamp_ms() + 2), gen_l2_tx(account3, Nonce(1)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); // Mempool is full. Accounts with non-sequential nonces and some accounts with lowest score should be purged. assert_eq!( HashSet::<_>::from_iter(mempool.get_mempool_info().purged_accounts), @@ -346,6 +348,7 @@ fn mempool_capacity() { mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .initiator_account(), account0 ); @@ -354,6 +357,7 @@ fn mempool_capacity() { mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .initiator_account(), account1 ); @@ -370,7 +374,7 @@ fn mempool_does_not_purge_all_accounts() { gen_l2_tx(account0, Nonce(1)), gen_l2_tx(account1, Nonce(1)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); // Mempool is full. Account 1 has tx with non-sequential nonce so it should be purged. // Txs from account 0 have sequential nonces but their number is greater than capacity; they should be kept. assert_eq!(mempool.get_mempool_info().purged_accounts, vec![account1]); @@ -380,6 +384,7 @@ fn mempool_does_not_purge_all_accounts() { mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .initiator_account(), account0 ); @@ -437,8 +442,8 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { } } -fn view(transaction: Option) -> (Address, u32) { - let tx = transaction.unwrap(); +fn view(transaction: Option<(Transaction, TransactionTimeRangeConstraint)>) -> (Address, u32) { + let tx = transaction.unwrap().0; (tx.initiator_account(), tx.nonce().unwrap().0) } diff --git a/core/lib/mempool/src/types.rs b/core/lib/mempool/src/types.rs index 99a63ffd08e..7c2694dff5e 100644 --- a/core/lib/mempool/src/types.rs +++ b/core/lib/mempool/src/types.rs @@ -1,14 +1,15 @@ use std::{cmp::Ordering, collections::HashMap}; use zksync_types::{ - fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, Address, Nonce, Transaction, U256, + fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, Address, Nonce, Transaction, + TransactionTimeRangeConstraint, U256, }; /// Pending mempool transactions of account #[derive(Debug)] pub(crate) struct AccountTransactions { /// transactions that belong to given account keyed by transaction nonce - transactions: HashMap, + transactions: HashMap, /// account nonce in mempool /// equals to committed nonce in db + number of transactions sent to state keeper nonce: Nonce, @@ -23,7 +24,11 @@ impl AccountTransactions { } /// Inserts new transaction for given account. Returns insertion metadata - pub fn insert(&mut self, transaction: L2Tx) -> InsertionMetadata { + pub fn insert( + &mut self, + transaction: L2Tx, + constraint: TransactionTimeRangeConstraint, + ) -> InsertionMetadata { let mut metadata = InsertionMetadata::default(); let nonce = transaction.common_data.nonce; // skip insertion if transaction is old @@ -33,8 +38,8 @@ impl AccountTransactions { let new_score = Self::score_for_transaction(&transaction); let previous_score = self .transactions - .insert(nonce, transaction) - .map(|tx| Self::score_for_transaction(&tx)); + .insert(nonce, (transaction, constraint)) + .map(|x| Self::score_for_transaction(&x.0)); metadata.is_new = previous_score.is_none(); if nonce == self.nonce { metadata.new_score = Some(new_score); @@ -43,9 +48,9 @@ impl AccountTransactions { metadata } - /// Returns next transaction to be included in block and optional score of its successor - /// Panics if no such transaction exists - pub fn next(&mut self) -> (L2Tx, Option) { + /// Returns next transaction to be included in block, its time range constraint and optional + /// score of its successor. Panics if no such transaction exists + pub fn next(&mut self) -> (L2Tx, TransactionTimeRangeConstraint, Option) { let transaction = self .transactions .remove(&self.nonce) @@ -54,12 +59,16 @@ impl AccountTransactions { let score = self .transactions .get(&self.nonce) - .map(Self::score_for_transaction); - (transaction, score) + .map(|(tx, _c)| Self::score_for_transaction(tx)); + (transaction.0, transaction.1, score) } - /// Handles transaction rejection. Returns optional score of its successor - pub fn reset(&mut self, transaction: &Transaction) -> Option { + /// Handles transaction rejection. Returns optional score of its successor and time range + /// constraint that the transaction has been added to the mempool with + pub fn reset( + &mut self, + transaction: &Transaction, + ) -> Option<(MempoolScore, TransactionTimeRangeConstraint)> { // current nonce for the group needs to be reset let tx_nonce = transaction .nonce() @@ -67,7 +76,7 @@ impl AccountTransactions { self.nonce = self.nonce.min(tx_nonce); self.transactions .get(&(tx_nonce + 1)) - .map(Self::score_for_transaction) + .map(|(tx, c)| (Self::score_for_transaction(tx), c.clone())) } pub fn len(&self) -> usize { diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index 057551a9efe..a095be9f374 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,7 +1,7 @@ use std::{ collections::{BTreeSet, HashSet}, marker::PhantomData, - sync::Arc, + sync::{Arc, Mutex}, }; use once_cell::sync::OnceCell; @@ -13,6 +13,10 @@ use zksync_types::{ vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; use zksync_utils::{address_to_u256, be_bytes_to_safe_address, u256_to_h256}; +use zksync_vm_interface::{ + tracer::{TimestampAsserterParams, ValidationTraces}, + L1BatchEnv, +}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -47,8 +51,11 @@ pub struct ValidationTracer { trusted_address_slots: HashSet<(Address, U256)>, computational_gas_used: u32, computational_gas_limit: u32, + timestamp_asserter_params: Option, vm_version: VmVersion, + l1_batch_env: L1BatchEnv, pub result: Arc>, + pub traces: Arc>, _marker: PhantomData H>, } @@ -57,30 +64,34 @@ type ValidationRoundResult = Result ValidationTracer { const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; - pub fn new( - params: ValidationParams, - vm_version: VmVersion, - ) -> (Self, Arc>) { - let result = Arc::new(OnceCell::new()); - ( - Self { - validation_mode: ValidationTracerMode::NoValidation, - auxilary_allowed_slots: Default::default(), - - should_stop_execution: false, - user_address: params.user_address, - paymaster_address: params.paymaster_address, - trusted_slots: params.trusted_slots, - trusted_addresses: params.trusted_addresses, - trusted_address_slots: params.trusted_address_slots, - computational_gas_used: 0, - computational_gas_limit: params.computational_gas_limit, - vm_version, - result: result.clone(), - _marker: Default::default(), - }, - result, - ) + pub fn new(params: ValidationParams, vm_version: VmVersion, l1_batch_env: L1BatchEnv) -> Self { + Self { + validation_mode: ValidationTracerMode::NoValidation, + auxilary_allowed_slots: Default::default(), + + should_stop_execution: false, + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + computational_gas_used: 0, + computational_gas_limit: params.computational_gas_limit, + timestamp_asserter_params: params.timestamp_asserter_params.clone(), + vm_version, + result: Arc::new(OnceCell::new()), + traces: Arc::new(Mutex::new(ValidationTraces::default())), + _marker: Default::default(), + l1_batch_env, + } + } + + pub fn get_result(&self) -> Arc> { + self.result.clone() + } + + pub fn get_traces(&self) -> Arc> { + self.traces.clone() } fn process_validation_round_result(&mut self, result: ValidationRoundResult) { @@ -154,6 +165,11 @@ impl ValidationTracer { return true; } + // Allow to read any storage slot from the timesttamp asserter contract + if self.timestamp_asserter_params.as_ref().map(|x| x.address) == Some(msg_sender) { + return true; + } + false } @@ -201,6 +217,7 @@ impl ValidationTracer { trusted_addresses: self.trusted_addresses.clone(), trusted_address_slots: self.trusted_address_slots.clone(), computational_gas_limit: self.computational_gas_limit, + timestamp_asserter_params: self.timestamp_asserter_params.clone(), } } } diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index c206bd6fb2a..d3dc7fd87c4 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -3,7 +3,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; +use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256, U256}; use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; use crate::{ @@ -26,6 +26,8 @@ use crate::{ HistoryMode, }; +pub const TIMESTAMP_ASSERTER_FUNCTION_SELECTOR: [u8; 4] = [0x5b, 0x1a, 0x0c, 0x91]; + impl ValidationTracer { fn check_user_restrictions_vm_latest( &mut self, @@ -81,6 +83,52 @@ impl ValidationTracer { called_address, )); } + // If this is a call to the timestamp asserter, extract the function arguments and store them in ValidationTraces. + // These arguments are used by the mempool for transaction filtering. The call data length should be 68 bytes: + // a 4-byte function selector followed by two U256 values. + if let Some(params) = &self.timestamp_asserter_params { + if called_address == params.address + && far_call_abi.memory_quasi_fat_pointer.length == 68 + { + let calldata_page = get_calldata_page_via_abi( + &far_call_abi, + state.vm_local_state.callstack.current.base_memory_page, + ); + let calldata = memory.read_unaligned_bytes( + calldata_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + 68, + ); + + if calldata[..4] == TIMESTAMP_ASSERTER_FUNCTION_SELECTOR { + // start and end need to be capped to u64::MAX to avoid overflow + let start = U256::from_big_endian( + &calldata[calldata.len() - 64..calldata.len() - 32], + ) + .try_into() + .unwrap_or(u64::MAX); + let end = U256::from_big_endian(&calldata[calldata.len() - 32..]) + .try_into() + .unwrap_or(u64::MAX); + + // using self.l1_batch_env.timestamp is ok here because the tracer is always + // used in a oneshot execution mode + if end + < self.l1_batch_env.timestamp + + params.min_time_till_end.as_secs() + { + return Err( + ViolatedValidationRule::TimestampAssertionCloseToRangeEnd, + ); + } + + self.traces + .lock() + .unwrap() + .apply_timestamp_asserter_range(start..end); + } + } + } } } Opcode::Context(context) => { diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 84f03c5afe3..660246928ed 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -98,6 +98,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_testnet_paymaster_addr")?, + l2_timestamp_asserter_addr: l2 + .timestamp_asserter_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_timestamp_asserter_addr")?, l1_multicall3_addr: required(&l1.multicall3_addr) .and_then(|x| parse_h160(x)) .context("l1_multicall3_addr")?, @@ -158,6 +164,9 @@ impl ProtoRepr for proto::Contracts { legacy_shared_bridge_addr: this .l2_legacy_shared_bridge_addr .map(|a| format!("{:?}", a)), + timestamp_asserter_addr: this + .l2_timestamp_asserter_addr + .map(|a| format!("{:?}", a)), }), bridges: Some(proto::Bridges { shared: Some(proto::Bridge { diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index b73539a0897..83b4c84f20b 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -46,6 +46,7 @@ impl ProtoRepr for proto::GeneralConfig { ), experimental_vm_config: read_optional_repr(&self.experimental_vm), prover_job_monitor_config: read_optional_repr(&self.prover_job_monitor), + timestamp_asserter_config: read_optional_repr(&self.timestamp_asserter), }) } @@ -106,6 +107,10 @@ impl ProtoRepr for proto::GeneralConfig { .prover_job_monitor_config .as_ref() .map(ProtoRepr::build), + timestamp_asserter: this + .timestamp_asserter_config + .as_ref() + .map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 68f7f699de2..09f42422c51 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -36,6 +36,7 @@ mod snapshot_recovery; mod snapshots_creator; #[cfg(test)] mod tests; +mod timestamp_asserter; mod utils; mod vm_runner; mod wallets; diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index 6ab03e6aa11..4ae0ee1614f 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -23,6 +23,7 @@ message L2 { optional string testnet_paymaster_addr = 1; // optional; H160 optional string da_validator_addr = 2; // optional; H160 optional string legacy_shared_bridge_addr = 3; // optional; H160 + optional string timestamp_asserter_addr = 4; // optional; H160 } message Bridge { diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index ee70b61b18b..216272f3f9a 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -26,6 +26,7 @@ import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; import "zksync/config/prover_job_monitor.proto"; import "zksync/config/da_client.proto"; +import "zksync/config/timestamp_asserter.proto"; message GeneralConfig { optional database.Postgres postgres = 1; @@ -62,4 +63,5 @@ message GeneralConfig { optional experimental.Vm experimental_vm = 44; optional prover_job_monitor.ProverJobMonitor prover_job_monitor = 45; optional da_client.DataAvailabilityClient da_client = 46; + optional timestamp_asserter.TimestampAsserter timestamp_asserter = 47; } diff --git a/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto b/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto new file mode 100644 index 00000000000..c8d0b9d1fec --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config.timestamp_asserter; + +message TimestampAsserter { + optional uint32 min_time_till_end_sec = 1; // required; u32 +} diff --git a/core/lib/protobuf_config/src/timestamp_asserter.rs b/core/lib/protobuf_config/src/timestamp_asserter.rs new file mode 100644 index 00000000000..5984caff8c6 --- /dev/null +++ b/core/lib/protobuf_config/src/timestamp_asserter.rs @@ -0,0 +1,19 @@ +use anyhow::Context; +use zksync_config::configs::chain::TimestampAsserterConfig; +use zksync_protobuf::{required, ProtoRepr}; + +impl ProtoRepr for crate::proto::config::timestamp_asserter::TimestampAsserter { + type Type = TimestampAsserterConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + min_time_till_end_sec: *required(&self.min_time_till_end_sec) + .context("timestamp_asserter_min_time_till_end_sec")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + min_time_till_end_sec: Some(this.min_time_till_end_sec), + } + } +} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 69e6e42fd51..320264f28f0 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::fmt; +use std::{fmt, ops::Range}; use anyhow::Context as _; use fee::encoding_len; @@ -416,3 +416,8 @@ impl Transaction { }) } } + +#[derive(Clone, Serialize, Debug, Default, Eq, PartialEq, Hash)] +pub struct TransactionTimeRangeConstraint { + pub timestamp_asserter_range: Option>, +} diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index a7363c633c6..e211328b5ec 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -4,18 +4,21 @@ use async_trait::async_trait; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, storage::ReadStorage, - tracer::{ValidationError, ValidationParams}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TxExecutionArgs, TxExecutionMode, VmExecutionResultAndLogs, }; use zksync_types::{l2::L2Tx, Transaction}; type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; +type TxValidationTracesResponseFn = + dyn Fn(&Transaction, &OneshotEnv) -> ValidationTraces + Send + Sync; /// Mock [`OneshotExecutor`] implementation. pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, + tx_validation_traces_responses: Box, } impl fmt::Debug for MockOneshotExecutor { @@ -35,6 +38,7 @@ impl Default for MockOneshotExecutor { tx_responses: Box::new(|tx, _| { panic!("Unexpect transaction call: {tx:?}"); }), + tx_validation_traces_responses: Box::new(|_, _| ValidationTraces::default()), } } } @@ -57,6 +61,13 @@ impl MockOneshotExecutor { self.tx_responses = self.wrap_responses(responses); } + pub fn set_tx_validation_traces_responses(&mut self, responses: F) + where + F: Fn(&Transaction, &OneshotEnv) -> ValidationTraces + 'static + Send + Sync, + { + self.tx_validation_traces_responses = Box::new(responses); + } + fn wrap_responses(&mut self, responses: F) -> Box where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, @@ -82,11 +93,11 @@ impl MockOneshotExecutor { self.tx_responses = Box::new(responses); } - fn mock_inspect(&self, env: OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { + fn mock_inspect(&self, env: &OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { match env.system.execution_mode { - TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, &env), + TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, env), TxExecutionMode::VerifyExecute | TxExecutionMode::EstimateFee => { - (self.tx_responses)(&args.transaction, &env) + (self.tx_responses)(&args.transaction, env) } } } @@ -105,7 +116,7 @@ where _params: OneshotTracingParams, ) -> anyhow::Result { Ok(OneshotTransactionExecutionResult { - tx_result: Box::new(self.mock_inspect(env, args)), + tx_result: Box::new(self.mock_inspect(&env, args)), compression_result: Ok(()), call_traces: vec![], }) @@ -123,14 +134,16 @@ where env: OneshotEnv, tx: L2Tx, _validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { Ok( match self - .mock_inspect(env, TxExecutionArgs::for_validation(tx)) + .mock_inspect(&env, TxExecutionArgs::for_validation(tx.clone())) .result { ExecutionResult::Halt { reason } => Err(ValidationError::FailedTx(reason)), - ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => Ok(()), + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => { + Ok((self.tx_validation_traces_responses)(&tx.into(), &env)) + } }, ) } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 154c838f824..7d45dcca8cd 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -18,7 +18,7 @@ use zksync_multivm::{ interface::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StorageView, StorageWithOverrides}, - tracer::{ValidationError, ValidationParams}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, utils::{DivergenceHandler, ShadowVm}, Call, ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, @@ -171,13 +171,14 @@ where env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { anyhow::ensure!( env.system.execution_mode == TxExecutionMode::VerifyExecute, "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", env.system.execution_mode ); + let l1_batch_env = env.l1_batch.clone(); let sandbox = VmSandbox { fast_vm_mode: FastVmMode::Old, panic_on_divergence: self.panic_on_divergence, @@ -188,11 +189,13 @@ where }; tokio::task::spawn_blocking(move || { - let (validation_tracer, mut validation_result) = - ValidationTracer::::new( - validation_params, - sandbox.env.system.version.into(), - ); + let validation_tracer = ValidationTracer::::new( + validation_params, + sandbox.env.system.version.into(), + l1_batch_env, + ); + let mut validation_result = validation_tracer.get_result(); + let validation_traces = validation_tracer.get_traces(); let tracers = vec![validation_tracer.into_tracer_pointer()]; let exec_result = sandbox.execute_in_vm(|vm, transaction| { @@ -209,7 +212,7 @@ where match (exec_result.result, validation_result) { (_, Err(violated_rule)) => Err(ValidationError::ViolatedRule(violated_rule)), (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), - _ => Ok(()), + _ => Ok(validation_traces.lock().unwrap().clone()), } }) .await diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 60522ba338a..30534b1420c 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -7,7 +7,7 @@ use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, - tracer::{ValidationError, ValidationParams}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, SystemEnv, TxExecutionArgs, }; @@ -69,5 +69,5 @@ pub trait TransactionValidator: OneshotExecutor { env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } diff --git a/core/lib/vm_interface/src/types/errors/halt.rs b/core/lib/vm_interface/src/types/errors/halt.rs index 88328e42b81..d24f55ab504 100644 --- a/core/lib/vm_interface/src/types/errors/halt.rs +++ b/core/lib/vm_interface/src/types/errors/halt.rs @@ -42,6 +42,7 @@ pub enum Halt { VMPanic, TracerCustom(String), FailedToPublishCompressedBytecodes, + FailedBlockTimestampAssertion, } impl fmt::Display for Halt { @@ -116,6 +117,9 @@ impl fmt::Display for Halt { Halt::FailedToPublishCompressedBytecodes => { write!(f, "Failed to publish compressed bytecodes") } + Halt::FailedBlockTimestampAssertion => { + write!(f, "Transaction failed block.timestamp assertion") + } } } } diff --git a/core/lib/vm_interface/src/types/tracer.rs b/core/lib/vm_interface/src/types/tracer.rs index ba07772c7f2..1c3f65f443e 100644 --- a/core/lib/vm_interface/src/types/tracer.rs +++ b/core/lib/vm_interface/src/types/tracer.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, fmt}; +use std::{collections::HashSet, fmt, ops::Range, time}; use zksync_types::{Address, U256}; @@ -57,6 +57,17 @@ pub struct ValidationParams { pub trusted_address_slots: HashSet<(Address, U256)>, /// Number of computational gas that validation step is allowed to use. pub computational_gas_limit: u32, + /// Parameters of the timestamp asserter if configured + pub timestamp_asserter_params: Option, +} + +#[derive(Debug, Clone)] +pub struct TimestampAsserterParams { + /// Address of the timestamp asserter. This contract is allowed to touch block.timestamp regardless + /// of the calling context. + pub address: Address, + /// Minimum time between current block.timestamp and the end of the asserted range + pub min_time_till_end: time::Duration, } /// Rules that can be violated when validating a transaction. @@ -70,6 +81,8 @@ pub enum ViolatedValidationRule { TouchedDisallowedContext, /// The transaction used too much gas during validation. TookTooManyComputationalGas(u32), + /// The transaction failed block.timestamp assertion because the block.timestamp is too close to the range end + TimestampAssertionCloseToRangeEnd, } impl fmt::Display for ViolatedValidationRule { @@ -91,6 +104,9 @@ impl fmt::Display for ViolatedValidationRule { "Took too many computational gas, allowed limit: {gas_limit}" ) } + ViolatedValidationRule::TimestampAssertionCloseToRangeEnd => { + write!(f, "block.timestamp is too close to the range end") + } } } } @@ -104,6 +120,30 @@ pub enum ValidationError { ViolatedRule(ViolatedValidationRule), } +/// Traces the validation of a transaction, providing visibility into the aspects the transaction interacts with. +/// For instance, the `timestamp_asserter_range` represent the range within which the transaction might make +/// assertions on `block.timestamp`. This information is crucial for the caller, as expired transactions should +/// be excluded from the mempool. +#[derive(Debug, Clone, Default)] +pub struct ValidationTraces { + pub timestamp_asserter_range: Option>, +} + +impl ValidationTraces { + /// Merges two ranges by selecting the maximum of the start values and the minimum of the end values, + /// producing the narrowest possible time window. Note that overlapping ranges are essential; + /// a lack of overlap would have triggered an assertion failure in the `TimestampAsserter` contract, + /// as `block.timestamp` cannot satisfy two non-overlapping ranges. + pub fn apply_timestamp_asserter_range(&mut self, new_range: Range) { + if let Some(range) = &mut self.timestamp_asserter_range { + range.start = range.start.max(new_range.start); + range.end = range.end.min(new_range.end); + } else { + self.timestamp_asserter_range = Some(new_range); + } + } +} + impl fmt::Display for ValidationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -116,3 +156,36 @@ impl fmt::Display for ValidationError { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_apply_range_when_none() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: None, + }; + let new_range = 10..20; + validation_traces.apply_timestamp_asserter_range(new_range.clone()); + assert_eq!(validation_traces.timestamp_asserter_range, Some(new_range)); + } + + #[test] + fn test_apply_range_with_overlap_narrower_result() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: Some(5..25), + }; + validation_traces.apply_timestamp_asserter_range(10..20); + assert_eq!(validation_traces.timestamp_asserter_range, Some(10..20)); + } + + #[test] + fn test_apply_range_with_partial_overlap() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: Some(10..30), + }; + validation_traces.apply_timestamp_asserter_range(20..40); + assert_eq!(validation_traces.timestamp_asserter_range, Some(20..30)); + } +} diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 47aae2a0835..07a7cc4ff1c 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -51,6 +51,9 @@ pub trait ZksNamespace { #[method(name = "getTestnetPaymaster")] async fn get_testnet_paymaster(&self) -> RpcResult>; + #[method(name = "getTimestampAsserter")] + async fn get_timestamp_asserter(&self) -> RpcResult>; + #[method(name = "getBridgeContracts")] async fn get_bridge_contracts(&self) -> RpcResult; diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index eb2170bcc84..5faef68507f 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -6,7 +6,7 @@ use zksync_config::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, + StateKeeperConfig, TimestampAsserterConfig, }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, @@ -81,6 +81,7 @@ pub struct TempConfigStore { pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, pub prover_job_monitor_config: Option, + pub timestamp_asserter_config: Option, } impl TempConfigStore { @@ -122,6 +123,7 @@ impl TempConfigStore { .clone(), experimental_vm_config: self.experimental_vm_config.clone(), prover_job_monitor_config: self.prover_job_monitor_config.clone(), + timestamp_asserter_config: self.timestamp_asserter_config.clone(), } } @@ -203,6 +205,7 @@ fn load_env_config() -> anyhow::Result { external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), prover_job_monitor_config: ProverJobMonitorConfig::from_env().ok(), + timestamp_asserter_config: TimestampAsserterConfig::from_env().ok(), }) } diff --git a/core/node/api_server/src/execution_sandbox/error.rs b/core/node/api_server/src/execution_sandbox/error.rs index 5d63d50a3c8..4523412ae19 100644 --- a/core/node/api_server/src/execution_sandbox/error.rs +++ b/core/node/api_server/src/execution_sandbox/error.rs @@ -26,6 +26,8 @@ pub(crate) enum SandboxExecutionError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), + #[error("Transaction failed block.timestamp assertion")] + FailedBlockTimestampAssertion, } impl From for SandboxExecutionError { @@ -67,6 +69,7 @@ impl From for SandboxExecutionError { Halt::FailedToPublishCompressedBytecodes => { Self::UnexpectedVMBehavior("Failed to publish compressed bytecodes".to_string()) } + Halt::FailedBlockTimestampAssertion => Self::FailedBlockTimestampAssertion, } } } diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 7958b5ed3c1..d58bf6ca38f 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -9,7 +9,7 @@ use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StorageWithOverrides}, - tracer::{ValidationError, ValidationParams}, + tracer::{TimestampAsserterParams, ValidationError, ValidationParams, ValidationTraces}, Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; @@ -99,6 +99,7 @@ pub(crate) struct SandboxExecutor { engine: SandboxExecutorEngine, pub(super) options: SandboxExecutorOptions, storage_caches: Option, + pub(super) timestamp_asserter_params: Option, } impl SandboxExecutor { @@ -106,6 +107,7 @@ impl SandboxExecutor { options: SandboxExecutorOptions, caches: PostgresStorageCaches, missed_storage_invocation_limit: usize, + timestamp_asserter_params: Option, ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); executor.set_fast_vm_mode(options.fast_vm_mode); @@ -117,6 +119,7 @@ impl SandboxExecutor { engine: SandboxExecutorEngine::Real(executor), options, storage_caches: Some(caches), + timestamp_asserter_params, } } @@ -132,6 +135,7 @@ impl SandboxExecutor { engine: SandboxExecutorEngine::Mock(executor), options, storage_caches: None, + timestamp_asserter_params: None, } } @@ -295,7 +299,7 @@ where env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { match &self.engine { SandboxExecutorEngine::Real(executor) => { executor diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index e342f2d73de..0aff15b973e 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -217,6 +217,7 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args SandboxExecutorOptions::mock().await, PostgresStorageCaches::new(1, 1), usize::MAX, + None, ); let fee_input = BatchFeeInput::l1_pegged(55, 555); @@ -265,6 +266,7 @@ async fn validating_transaction(set_balance: bool) { SandboxExecutorOptions::mock().await, PostgresStorageCaches::new(1, 1), usize::MAX, + None, ); let fee_input = BatchFeeInput::l1_pegged(55, 555); diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 758547abbd6..3d58f807a89 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -6,7 +6,10 @@ use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ executor::TransactionValidator, storage::StorageWithOverrides, - tracer::{ValidationError as RawValidationError, ValidationParams}, + tracer::{ + TimestampAsserterParams, ValidationError as RawValidationError, ValidationParams, + ValidationTraces, + }, }; use zksync_types::{ fee_model::BatchFeeInput, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, @@ -38,13 +41,14 @@ impl SandboxExecutor { block_args: BlockArgs, fee_input: BatchFeeInput, whitelisted_tokens_for_aa: &[Address], - ) -> Result<(), ValidationError> { + ) -> Result { let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); let validation_params = get_validation_params( &mut connection, &tx, self.options.eth_call.validation_computational_gas_limit(), whitelisted_tokens_for_aa, + self.timestamp_asserter_params.clone(), ) .await .context("failed getting validation params")?; @@ -79,6 +83,7 @@ pub(super) async fn get_validation_params( tx: &L2Tx, computational_gas_limit: u32, whitelisted_tokens_for_aa: &[Address], + timestamp_asserter_params: Option, ) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; @@ -125,5 +130,6 @@ pub(super) async fn get_validation_params( trusted_addresses, trusted_address_slots, computational_gas_limit, + timestamp_asserter_params, }) } diff --git a/core/node/api_server/src/tx_sender/master_pool_sink.rs b/core/node/api_server/src/tx_sender/master_pool_sink.rs index 736edf0b247..06333f0c136 100644 --- a/core/node/api_server/src/tx_sender/master_pool_sink.rs +++ b/core/node/api_server/src/tx_sender/master_pool_sink.rs @@ -2,7 +2,7 @@ use std::collections::hash_map::{Entry, HashMap}; use tokio::sync::Mutex; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{l2::L2Tx, Address, Nonce, H256}; @@ -31,6 +31,7 @@ impl TxSink for MasterPoolSink { &self, tx: &L2Tx, execution_metrics: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> Result { let address_and_nonce = (tx.initiator_account(), tx.nonce()); @@ -55,7 +56,7 @@ impl TxSink for MasterPoolSink { let result = match self.master_pool.connection_tagged("api").await { Ok(mut connection) => connection .transactions_dal() - .insert_transaction_l2(tx, execution_metrics) + .insert_transaction_l2(tx, execution_metrics, validation_traces) .await .inspect(|submission_res_handle| { APP_METRICS.processed_txs[&TxStage::Mempool(*submission_res_handle)].inc(); diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 75cc1ad602f..011d9e4e2b2 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -1,6 +1,6 @@ //! Helper module to submit transactions into the ZKsync Network. -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::RwLock; @@ -9,7 +9,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{OneshotTracingParams, TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{ + tracer::TimestampAsserterParams as TracerTimestampAsserterParams, OneshotTracingParams, + TransactionExecutionMetrics, VmExecutionResultAndLogs, + }, utils::{derive_base_fee_and_gas_per_pubdata, get_max_batch_gas_limit}, }; use zksync_node_fee_model::{ApiFeeInputProvider, BatchFeeModelInputProvider}; @@ -205,6 +208,12 @@ impl TxSenderBuilder { executor_options, storage_caches, missed_storage_invocation_limit, + self.config.timestamp_asserter_params.clone().map(|params| { + TracerTimestampAsserterParams { + address: params.address, + min_time_till_end: params.min_time_till_end, + } + }), ); TxSender(Arc::new(TxSenderInner { @@ -234,6 +243,13 @@ pub struct TxSenderConfig { pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, pub whitelisted_tokens_for_aa: Vec
, + pub timestamp_asserter_params: Option, +} + +#[derive(Debug, Clone)] +pub struct TimestampAsserterParams { + pub address: Address, + pub min_time_till_end: Duration, } impl TxSenderConfig { @@ -242,6 +258,7 @@ impl TxSenderConfig { web3_json_config: &Web3JsonRpcConfig, fee_account_addr: Address, chain_id: L2ChainId, + timestamp_asserter_params: Option, ) -> Self { Self { fee_account_addr, @@ -253,6 +270,7 @@ impl TxSenderConfig { .validation_computational_gas_limit, chain_id, whitelisted_tokens_for_aa: web3_json_config.whitelisted_tokens_for_aa.clone(), + timestamp_asserter_params, } } } @@ -361,14 +379,15 @@ impl TxSender { if !execution_output.are_published_bytecodes_ok { return Err(SubmitTxError::FailedToPublishCompressedBytecodes); } - let mut stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DbInsert); self.ensure_tx_executable(&tx.clone().into(), &execution_output.metrics, true)?; + + let validation_traces = validation_result?; let submission_res_handle = self .0 .tx_sink - .submit_tx(&tx, execution_output.metrics) + .submit_tx(&tx, execution_output.metrics, validation_traces) .await?; match submission_res_handle { diff --git a/core/node/api_server/src/tx_sender/proxy.rs b/core/node/api_server/src/tx_sender/proxy.rs index 536a9767c1f..bba462404cf 100644 --- a/core/node/api_server/src/tx_sender/proxy.rs +++ b/core/node/api_server/src/tx_sender/proxy.rs @@ -11,7 +11,7 @@ use zksync_dal::{ helpers::wait_for_l1_batch, transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, DalError, }; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{api, l2::L2Tx, Address, Nonce, H256, U256}; use zksync_web3_decl::{ @@ -309,6 +309,7 @@ impl TxSink for TxProxy { &self, tx: &L2Tx, _execution_metrics: TransactionExecutionMetrics, + _validation_traces: ValidationTraces, ) -> Result { // We're running an external node: we have to proxy the transaction to the main node. // But before we do that, save the tx to cache in case someone will request it @@ -416,7 +417,11 @@ mod tests { let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert!(send_tx_called.load(Ordering::Relaxed)); @@ -525,7 +530,11 @@ mod tests { let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap_err(); @@ -585,7 +594,11 @@ mod tests { // Add transaction to the cache let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert_eq!(proxy.tx_cache.get(tx.hash()).await.unwrap(), tx); @@ -662,15 +675,27 @@ mod tests { .build(); let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); proxy - .submit_tx(&replacing_tx, TransactionExecutionMetrics::default()) + .submit_tx( + &replacing_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); proxy - .submit_tx(&future_tx, TransactionExecutionMetrics::default()) + .submit_tx( + &future_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); { diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index e2a51ae8e9a..cbc55a73c7c 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -67,6 +67,8 @@ pub enum SubmitTxError { /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), + #[error("transaction failed block.timestamp assertion")] + FailedBlockTimestampAssertion, } impl SubmitTxError { @@ -96,6 +98,7 @@ impl SubmitTxError { Self::MintedAmountOverflow => "minted-amount-overflow", Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", + Self::FailedBlockTimestampAssertion => "failed-block-timestamp-assertion", } } @@ -133,6 +136,9 @@ impl From for SubmitTxError { SandboxExecutionError::FailedToPayForTransaction(reason) => { Self::FailedToChargeFee(reason) } + SandboxExecutionError::FailedBlockTimestampAssertion => { + Self::FailedBlockTimestampAssertion + } } } } diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index ea3f77fbcd8..cbe405b2aa6 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -155,7 +155,7 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { executor_options.set_fast_vm_mode(FastVmMode::Shadow); let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX, None); create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) .await .0 diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index fdd63254cf0..c861f04a832 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -1,8 +1,11 @@ //! Tests for sending raw transactions. +use std::ops::Range; + use assert_matches::assert_matches; +use chrono::NaiveDateTime; use test_casing::test_casing; -use zksync_multivm::interface::ExecutionResult; +use zksync_multivm::interface::{tracer::ValidationTraces, ExecutionResult}; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_test_utils::create_l2_transaction; use zksync_types::K256PrivateKey; @@ -54,6 +57,16 @@ async fn submitting_tx_requires_one_connection() { .await .unwrap() .expect("transaction is not persisted"); + + let storage_tx = storage + .transactions_dal() + .get_storage_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("transaction is not persisted"); + // verify that no validation traces have been persisted + assert!(storage_tx.timestamp_asserter_range_start.is_none()); + assert!(storage_tx.timestamp_asserter_range_start.is_none()); } #[tokio::test] @@ -298,3 +311,88 @@ async fn sending_transaction_out_of_gas() { let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); } + +async fn submit_tx_with_validation_traces(actual_range: Range, expected_range: Range) { + // This test verifies that when a transaction produces ValidationTraces, + // range_start and range_end get persisted in the database + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + tx_executor.set_tx_validation_traces_responses(move |tx, _| { + assert_eq!(tx.hash(), tx_hash); + ValidationTraces { + timestamp_asserter_range: Some(actual_range.clone()), + } + }); + + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; + + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + let storage_tx = storage + .transactions_dal() + .get_storage_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("transaction is not persisted"); + assert_eq!( + expected_range.start, + storage_tx + .timestamp_asserter_range_start + .unwrap() + .and_utc() + .timestamp() + ); + assert_eq!( + expected_range.end, + storage_tx + .timestamp_asserter_range_end + .unwrap() + .and_utc() + .timestamp() + ); +} + +#[tokio::test] +async fn submitting_tx_with_validation_traces() { + // This test verifies that when a transaction produces ValidationTraces, + // range_start and range_end get persisted in the database + submit_tx_with_validation_traces(10..20, 10..20).await; +} + +#[tokio::test] +async fn submitting_tx_with_validation_traces_resulting_into_overflow() { + // This test verifies that the timestamp in ValidationTraces is capped at + // the maximum value supported by the NaiveDateTime type + submit_tx_with_validation_traces(10..u64::MAX, 10..NaiveDateTime::MAX.and_utc().timestamp()) + .await; +} diff --git a/core/node/api_server/src/tx_sender/tx_sink.rs b/core/node/api_server/src/tx_sender/tx_sink.rs index 3d764816fe0..1a6a7a733cc 100644 --- a/core/node/api_server/src/tx_sender/tx_sink.rs +++ b/core/node/api_server/src/tx_sender/tx_sink.rs @@ -1,5 +1,5 @@ use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, Core}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_types::{ api::{Transaction, TransactionDetails, TransactionId}, l2::L2Tx, @@ -28,6 +28,7 @@ pub trait TxSink: std::fmt::Debug + Send + Sync + 'static { &self, tx: &L2Tx, execution_metrics: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> Result; /// Attempts to look up the pending nonce for the account in the sink-specific storage. diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 31c8f15bb1e..21f3f5ae49e 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -58,6 +58,10 @@ impl ZksNamespaceServer for ZksNamespace { Ok(self.get_bridge_contracts_impl().await) } + async fn get_timestamp_asserter(&self) -> RpcResult> { + Ok(self.get_timestamp_asserter_impl()) + } + async fn l1_chain_id(&self) -> RpcResult { Ok(self.l1_chain_id_impl()) } diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index bcfd7daf346..1a4114bd2c6 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -151,6 +151,10 @@ impl ZksNamespace { self.state.bridge_addresses_handle.read().await } + pub fn get_timestamp_asserter_impl(&self) -> Option
{ + self.state.api_config.timestamp_asserter_address + } + pub fn l1_chain_id_impl(&self) -> U64 { U64::from(*self.state.api_config.l1_chain_id) } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index a2aee8c7420..d43771811ee 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -115,6 +115,7 @@ pub struct InternalApiConfig { pub filters_disabled: bool, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + pub timestamp_asserter_address: Option
, } impl InternalApiConfig { @@ -168,6 +169,7 @@ impl InternalApiConfig { filters_disabled: web3_config.filters_disabled, dummy_verifier: genesis_config.dummy_verifier, l1_batch_commit_data_generator_mode: genesis_config.l1_batch_commit_data_generator_mode, + timestamp_asserter_address: contracts_config.l2_timestamp_asserter_addr, } } } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 2d642b9a04b..540ea085711 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -34,6 +34,7 @@ pub(crate) async fn create_test_tx_sender( &web3_config, wallets.state_keeper.unwrap().fee_account.address(), l2_chain_id, + None, ); let storage_caches = PostgresStorageCaches::new(1, 1); diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index d8080f1dba5..17e92200d66 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -19,8 +19,8 @@ use zksync_config::{ use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionMetrics, + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionMetrics, }; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ @@ -364,7 +364,11 @@ async fn store_custom_l2_block( let l2_tx = result.transaction.clone().try_into().unwrap(); let tx_submission_result = storage .transactions_dal() - .insert_transaction_l2(&l2_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &l2_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); @@ -771,7 +775,11 @@ impl HttpTest for TransactionCountTest { pending_tx.common_data.nonce = Nonce(2); storage .transactions_dal() - .insert_transaction_l2(&pending_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &pending_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -851,7 +859,11 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { let mut storage = pool.connection().await?; storage .transactions_dal() - .insert_transaction_l2(&pending_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &pending_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index d9faf7b664e..12ac8bdbf3f 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -620,6 +620,9 @@ async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec .sync_mempool(&[], &[], 0, 0, 1000) .await .unwrap() + .into_iter() + .map(|x| x.0) + .collect() } fn tx_into_log(tx: L1Tx) -> Log { diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 1be7e00543f..0f5f4d6253f 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -362,6 +362,7 @@ impl StateKeeperIO for ExternalIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + _l2_block_timestamp: u64, ) -> anyhow::Result> { tracing::debug!( "Waiting for the new tx, next action is {:?}", diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 0e924b9f066..75d7c9f1e94 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -32,6 +32,7 @@ zksync_vm_executor.workspace = true zksync_system_constants.workspace = true zksync_base_token_adjuster.workspace = true + anyhow.workspace = true async-trait.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index ec9f906b1cd..2298d4c2ee7 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -9,7 +9,7 @@ use futures::FutureExt; use zksync_config::GenesisConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, @@ -355,7 +355,11 @@ async fn store_pending_l2_blocks( let tx = create_l2_transaction(10, 100); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut new_l2_block = create_l2_block(l2_block_number); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index dfddd36aba7..370d46fd544 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -278,6 +278,7 @@ impl StateKeeperIO for MempoolIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + l2_block_timestamp: u64, ) -> anyhow::Result> { let started_at = Instant::now(); while started_at.elapsed() <= max_wait { @@ -285,7 +286,7 @@ impl StateKeeperIO for MempoolIO { let maybe_tx = self.mempool.next_transaction(&self.filter); get_latency.observe(); - if let Some(tx) = maybe_tx { + if let Some((tx, constraint)) = maybe_tx { // Reject transactions with too big gas limit. They are also rejected on the API level, but // we need to secure ourselves in case some tx will somehow get into mempool. if tx.gas_limit() > self.max_allowed_tx_gas_limit { @@ -298,6 +299,23 @@ impl StateKeeperIO for MempoolIO { .await?; continue; } + + // Reject transactions that violate block.timestamp constraints. Such transactions should be + // rejected at the API level, but we need to protect ourselves in case if a transaction + // goes outside of the allowed range while being in the mempool + let matches_range = constraint + .timestamp_asserter_range + .map_or(true, |x| x.contains(&l2_block_timestamp)); + + if !matches_range { + self.reject( + &tx, + UnexecutableReason::Halt(Halt::FailedBlockTimestampAssertion), + ) + .await?; + continue; + } + return Ok(Some(tx)); } else { tokio::time::sleep(self.delay_interval).await; @@ -309,9 +327,9 @@ impl StateKeeperIO for MempoolIO { async fn rollback(&mut self, tx: Transaction) -> anyhow::Result<()> { // Reset nonces in the mempool. - self.mempool.rollback(&tx); + let constraint = self.mempool.rollback(&tx); // Insert the transaction back. - self.mempool.insert(vec![tx], HashMap::new()); + self.mempool.insert(vec![(tx, constraint)], HashMap::new()); Ok(()) } diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index e2461e72d7b..fbc481fb678 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -137,8 +137,11 @@ pub trait StateKeeperIO: 'static + Send + Sync + fmt::Debug + IoSealCriteria { /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. - async fn wait_for_next_tx(&mut self, max_wait: Duration) - -> anyhow::Result>; + async fn wait_for_next_tx( + &mut self, + max_wait: Duration, + l2_block_timestamp: u64, + ) -> anyhow::Result>; /// Marks the transaction as "not executed", so it can be retrieved from the IO again. async fn rollback(&mut self, tx: Transaction) -> anyhow::Result<()>; /// Marks the transaction as "rejected", e.g. one that is not correct and can't be executed. diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 4fc58bce5c9..53871c54a19 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -458,7 +458,7 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { mod tests { use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ - interface::{TransactionExecutionResult, TxExecutionStatus}, + interface::{tracer::ValidationTraces, TransactionExecutionResult, TxExecutionStatus}, utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}, zk_evm_latest::ethereum_types::H256, VmVersion, @@ -487,7 +487,7 @@ mod tests { .await .unwrap() .transactions_dal() - .insert_transaction_l2(&tx, Default::default()) + .insert_transaction_l2(&tx, Default::default(), ValidationTraces::default()) .await .unwrap(); let tx_hash = tx.hash(); diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index ece5b67767f..adef238fe92 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,11 +1,16 @@ -use std::{collections::HashMap, time::Duration}; +use std::{ + collections::HashMap, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmEvent, VmExecutionMetrics}, + interface::{ + tracer::ValidationTraces, TransactionExecutionMetrics, VmEvent, VmExecutionMetrics, + }, utils::derive_base_fee_and_gas_per_pubdata, }; use zksync_node_test_utils::prepare_recovery_snapshot; @@ -13,8 +18,9 @@ use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, commitment::L1BatchCommitmentMode, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, + l2::L2Tx, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, H256, U256, + ProtocolVersionId, StorageKey, TransactionTimeRangeConstraint, H256, U256, }; use zksync_utils::time::seconds_since_epoch; @@ -130,6 +136,7 @@ async fn test_filter_with_no_pending_batch(commitment_mode: L1BatchCommitmentMod &mut guard, want_filter.fee_per_gas, want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); // Now, given that there is a transaction matching the expected filter, waiting for the new batch params @@ -169,7 +176,12 @@ async fn test_timestamps_are_distinct( ) .await .unwrap(); - tester.insert_tx(&mut guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata); + tester.insert_tx( + &mut guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), + ); let l1_batch_params = mempool .wait_for_new_batch_params(&io_cursor, Duration::from_secs(10)) @@ -431,10 +443,15 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom &mut mempool_guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -584,10 +601,15 @@ async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMo &mut mempool_guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -644,3 +666,118 @@ async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { assert_eq!(l1_batch_params.fee_input, fee_input); assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); } + +#[tokio::test] +async fn test_mempool_with_timestamp_assertion() { + let connection_pool = ConnectionPool::::constrained_test_pool(2).await; + // what commitment mode to use is irrelevant here + let tester = Tester::new(L1BatchCommitmentMode::Rollup); + let mut storage = connection_pool.connection().await.unwrap(); + + tester.genesis(&connection_pool).await; + + // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. + // These gas values are random and don't matter for filter calculation. + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + + // Create a copy of the tx filter that the mempool will use. + let want_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + + // Create a mempool without pending batch and ensure that filter is not initialized just yet. + let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool).await; + mempool.initialize().await.unwrap(); + assert_eq!(mempool.filter(), &L2TxFilter::default()); + + let system_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs(); + + // inserting 3 transactions - a good one, sandwiched in between two bad ones. The good one should + // be returned by wait_for_next_tx, while two bad ones should be rejected. + let rejected_tx_1 = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time - 20000..system_time - 10000), + }, + ); + let expected_tx = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time - 1000..system_time + 1000), + }, + ); + let rejected_tx_2 = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time + 10000..system_time + 20000), + }, + ); + insert_l2_transaction(&mut storage, &rejected_tx_1).await; + insert_l2_transaction(&mut storage, &expected_tx).await; + insert_l2_transaction(&mut storage, &rejected_tx_2).await; + + let tx = mempool + .wait_for_next_tx(Duration::from_secs(2), system_time) + .await + .unwrap() + .expect("No expected transaction in the mempool"); + assert_eq!(expected_tx.hash(), tx.hash()); + + let next_tx = mempool + .wait_for_next_tx(Duration::from_secs(2), system_time) + .await + .expect("Should be no more transactions in the mempool"); + assert!(next_tx.is_none()); + + // verify that two transactions have been rejected + let rejected_storage_tx_1 = storage + .transactions_dal() + .get_storage_tx_by_hash(rejected_tx_1.hash()) + .await + .unwrap() + .expect("Failed to find transaction"); + assert_eq!( + "rejected: Transaction failed block.timestamp assertion", + rejected_storage_tx_1.error.unwrap() + ); + + let rejected_storage_tx_2 = storage + .transactions_dal() + .get_storage_tx_by_hash(rejected_tx_2.hash()) + .await + .unwrap() + .expect("Failed to find transaction"); + assert_eq!( + "rejected: Transaction failed block.timestamp assertion", + rejected_storage_tx_2.error.unwrap() + ); +} + +async fn insert_l2_transaction(storage: &mut Connection<'_, Core>, tx: &L2Tx) { + storage + .transactions_dal() + .insert_transaction_l2( + tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) + .await + .unwrap(); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index daedbebc75e..32a746eecdf 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -11,7 +11,9 @@ use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, TransactionExecutionResult}, + interface::{ + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_fee_model::{ @@ -30,7 +32,8 @@ use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, pubdata_da::PubdataSendingMode, system_contracts::get_system_smart_contracts, - L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, + L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, TransactionTimeRangeConstraint, + H256, }; use crate::{MempoolGuard, MempoolIO}; @@ -188,7 +191,11 @@ impl Tester { let tx = create_l2_transaction(10, 100); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); storage @@ -252,9 +259,10 @@ impl Tester { guard: &mut MempoolGuard, fee_per_gas: u64, gas_per_pubdata: u32, + constraint: TransactionTimeRangeConstraint, ) -> L2Tx { let tx = create_l2_transaction(fee_per_gas, gas_per_pubdata.into()); - guard.insert(vec![tx.clone().into()], Default::default()); + guard.insert(vec![(tx.clone().into(), constraint)], Default::default()); tx } } diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 523dd8eceba..60e20603899 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -589,11 +589,10 @@ impl ZkSyncStateKeeper { Self::start_next_l2_block(new_l2_block_params, updates_manager, batch_executor) .await?; } - let waiting_latency = KEEPER_METRICS.waiting_for_tx.start(); let Some(tx) = self .io - .wait_for_next_tx(POLL_WAIT_DURATION) + .wait_for_next_tx(POLL_WAIT_DURATION, updates_manager.l2_block.timestamp) .instrument(info_span!("wait_for_next_tx")) .await .context("error waiting for next transaction")? diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index a17f2670cbb..8e9d674f878 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -111,7 +111,7 @@ impl MempoolFetcher { (filter.fee_per_gas, filter.gas_per_pubdata) }; - let transactions = storage + let transactions_with_constraints = storage .transactions_dal() .sync_mempool( &mempool_info.stashed_accounts, @@ -122,16 +122,22 @@ impl MempoolFetcher { ) .await .context("failed syncing mempool")?; + + let transactions: Vec<_> = transactions_with_constraints + .iter() + .map(|(t, _c)| t) + .collect(); + let nonces = get_transaction_nonces(&mut storage, &transactions).await?; drop(storage); #[cfg(test)] { - let transaction_hashes = transactions.iter().map(Transaction::hash).collect(); + let transaction_hashes = transactions.iter().map(|x| x.hash()).collect(); self.transaction_hashes_sender.send(transaction_hashes).ok(); } let all_transactions_loaded = transactions.len() < self.sync_batch_size; - self.mempool.insert(transactions, nonces); + self.mempool.insert(transactions_with_constraints, nonces); latency.observe(); if all_transactions_loaded { @@ -145,7 +151,7 @@ impl MempoolFetcher { /// Loads nonces for all distinct `transactions` initiators from the storage. async fn get_transaction_nonces( storage: &mut Connection<'_, Core>, - transactions: &[Transaction], + transactions: &[&Transaction], ) -> anyhow::Result> { let (nonce_keys, address_by_nonce_key): (Vec<_>, HashMap<_, _>) = transactions .iter() @@ -173,7 +179,7 @@ async fn get_transaction_nonces( #[cfg(test)] mod tests { - use zksync_multivm::interface::TransactionExecutionMetrics; + use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::create_l2_transaction; @@ -215,7 +221,7 @@ mod tests { let nonces = get_transaction_nonces( &mut storage, - &[transaction.into(), other_transaction.into()], + &[&transaction.into(), &other_transaction.into()], ) .await .unwrap(); @@ -261,7 +267,11 @@ mod tests { let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); @@ -317,7 +327,11 @@ mod tests { let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); @@ -370,7 +384,11 @@ mod tests { .unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 962cc807318..b82d61666fb 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -54,6 +54,7 @@ fn halt_as_metric_label(halt: &Halt) -> &'static str { Halt::VMPanic => "VMPanic", Halt::TracerCustom(_) => "TracerCustom", Halt::FailedToPublishCompressedBytecodes => "FailedToPublishCompressedBytecodes", + Halt::FailedBlockTimestampAssertion => "FailedBlockTimestampAssertion", } } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 45787b18f3c..5fe05167504 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -731,6 +731,7 @@ impl StateKeeperIO for TestIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + _l2_block_timestamp: u64, ) -> anyhow::Result> { let action = self.pop_next_item("wait_for_next_tx"); diff --git a/core/node/state_keeper/src/types.rs b/core/node/state_keeper/src/types.rs index e112871a647..db18e32e096 100644 --- a/core/node/state_keeper/src/types.rs +++ b/core/node/state_keeper/src/types.rs @@ -6,7 +6,9 @@ use std::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; use zksync_multivm::interface::{VmExecutionMetrics, VmExecutionResultAndLogs}; -use zksync_types::{block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction}; +use zksync_types::{ + block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction, TransactionTimeRangeConstraint, +}; use super::{ metrics::StateKeeperGauges, @@ -30,13 +32,32 @@ impl MempoolGuard { Self(Arc::new(Mutex::new(store))) } - pub fn insert(&mut self, transactions: Vec, nonces: HashMap) { + pub fn insert( + &mut self, + transactions: Vec<(Transaction, TransactionTimeRangeConstraint)>, + nonces: HashMap, + ) { self.0 .lock() .expect("failed to acquire mempool lock") .insert(transactions, nonces); } + #[cfg(test)] + pub fn insert_without_constraint( + &mut self, + transactions: Vec, + nonces: HashMap, + ) { + self.insert( + transactions + .into_iter() + .map(|x| (x, TransactionTimeRangeConstraint::default())) + .collect(), + nonces, + ); + } + pub fn has_next(&self, filter: &L2TxFilter) -> bool { self.0 .lock() @@ -44,18 +65,21 @@ impl MempoolGuard { .has_next(filter) } - pub fn next_transaction(&mut self, filter: &L2TxFilter) -> Option { + pub fn next_transaction( + &mut self, + filter: &L2TxFilter, + ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { self.0 .lock() .expect("failed to acquire mempool lock") .next_transaction(filter) } - pub fn rollback(&mut self, rejected: &Transaction) { + pub fn rollback(&mut self, rejected: &Transaction) -> TransactionTimeRangeConstraint { self.0 .lock() .expect("failed to acquire mempool lock") - .rollback(rejected); + .rollback(rejected) } pub fn get_mempool_info(&mut self) -> MempoolInfo { diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 575fd59be04..a3438d5a4e1 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -20,7 +20,9 @@ use zksync_types::{ StorageLog, StorageLogKind, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics}; +use zksync_vm_interface::{ + tracer::ValidationTraces, L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics, +}; use super::*; @@ -242,7 +244,11 @@ async fn store_l1_batches( let account = accounts.choose_mut(&mut rng).unwrap(); let tx = create_l2_transaction(account, 1000000, 100); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await?; let mut logs = Vec::new(); let mut written_keys = Vec::new(); diff --git a/core/tests/ts-integration/contracts/custom-account/custom-account.sol b/core/tests/ts-integration/contracts/custom-account/custom-account.sol index fc90355ac64..99177212477 100644 --- a/core/tests/ts-integration/contracts/custom-account/custom-account.sol +++ b/core/tests/ts-integration/contracts/custom-account/custom-account.sol @@ -9,6 +9,10 @@ import './SystemContractsCaller.sol'; import './interfaces/IAccount.sol'; +interface ITimestampAsserter { + function assertTimestampInRange(uint256 start, uint256 end) external view; +} + contract CustomAccount is IAccount { event BootloaderBalance(uint256); @@ -18,15 +22,28 @@ contract CustomAccount is IAccount { uint256 public gasToSpent; bytes32 public lastTxHash; + address public timestampAsserterAddress; + uint256 public timestampAsserterRangeStart; + uint256 public timestampAsserterRangeEnd; + - constructor(bool _violateValidationRules) { + constructor(bool _violateValidationRules, address _timestampAsserterAddress, uint256 _timestampAsserterRangeStart, uint256 _timestampAsserterRangeEnd) { violateValidationRules = _violateValidationRules; + timestampAsserterAddress = _timestampAsserterAddress; + timestampAsserterRangeStart = _timestampAsserterRangeStart; + timestampAsserterRangeEnd = _timestampAsserterRangeEnd; } // bytes4(keccak256("isValidSignature(bytes32,bytes)") bytes4 constant EIP1271_SUCCESS_RETURN_VALUE = 0x1626ba7e; function validateTransaction(bytes32 _txHash, bytes32 _suggestedSignedTxHash, Transaction calldata _transaction) external payable override returns (bytes4 magic) { + ITimestampAsserter timestampAsserter = ITimestampAsserter(timestampAsserterAddress); + // This assertion exists to ensure that block.timestamp can be accessed in AA by using + // ITimestampAsserter contract + + timestampAsserter.assertTimestampInRange(timestampAsserterRangeStart, timestampAsserterRangeEnd); + magic = _validateTransaction(_suggestedSignedTxHash, _transaction); lastTxHash = _txHash; diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 596872ab9c5..b91fcd09f0a 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -87,7 +87,7 @@ async function loadTestEnvironmentFromFile(fileConfig: FileConfig): Promise { ); const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; + if (!process.env.CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR) { + throw new Error('CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR is not defined'); + } + const timestampAsserterAddress = process.env.CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR.toString(); + + const timestampAsserterMinTimeTillEndSec = parseInt(process.env.TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC!); + return { maxLogsLimit, pathToHome, @@ -313,7 +324,9 @@ export async function loadTestEnvironmentFromEnv(): Promise { decimals: baseToken?.decimals || token.decimals, l1Address: baseToken?.address || token.address, l2Address: baseTokenAddressL2 - } + }, + timestampAsserterAddress, + timestampAsserterMinTimeTillEndSec }; } diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index c513480c1b4..014031a3dd7 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -94,6 +94,8 @@ export interface TestEnvironment { */ baseToken: Token; healthcheckPort: string; + timestampAsserterAddress: string; + timestampAsserterMinTimeTillEndSec: number; } /** diff --git a/core/tests/ts-integration/tests/custom-account.test.ts b/core/tests/ts-integration/tests/custom-account.test.ts index 46ddba95323..ebbe11b8719 100644 --- a/core/tests/ts-integration/tests/custom-account.test.ts +++ b/core/tests/ts-integration/tests/custom-account.test.ts @@ -18,6 +18,9 @@ const contracts = { // We create multiple custom accounts and we need to fund them with ETH to pay for fees. const ETH_PER_CUSTOM_ACCOUNT = L2_DEFAULT_ETH_PER_ACCOUNT / 8n; const TRANSFER_AMOUNT = 1n; +const DEFAULT_TIMESTAMP_ASSERTER_RANGE_START = 0; +// 2555971200 is a number of seconds up to 30/12/2050 +const DEFAULT_TIMESTAMP_ASSERTER_RANGE_END = 2555971200; describe('Tests for the custom account behavior', () => { let testMaster: TestMaster; @@ -25,11 +28,13 @@ describe('Tests for the custom account behavior', () => { let customAccount: zksync.Contract; let erc20Address: string; let erc20: zksync.Contract; + let timestampAsserterAddress: string; beforeAll(() => { testMaster = TestMaster.getInstance(__filename); alice = testMaster.mainAccount(); erc20Address = testMaster.environment().erc20Token.l2Address; + timestampAsserterAddress = testMaster.environment().timestampAsserterAddress; erc20 = new zksync.Contract( erc20Address, zksync.utils.IERC20, @@ -40,7 +45,17 @@ describe('Tests for the custom account behavior', () => { test('Should deploy custom account', async () => { const violateRules = false; - customAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); + customAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'createAccount' + ); // Now we need to check that it was correctly marked as an account: const contractAccountInfo = await alice.provider.getContractAccountInfo(await customAccount.getAddress()); @@ -50,6 +65,8 @@ describe('Tests for the custom account behavior', () => { // Checking that the nonce ordering is correct expect(contractAccountInfo.nonceOrdering).toEqual(zksync.types.AccountNonceOrdering.Sequential); + + return customAccount; }); test('Should fund the custom account', async () => { @@ -60,7 +77,7 @@ describe('Tests for the custom account behavior', () => { .transfer({ to: await customAccount.getAddress(), token: erc20Address, - amount: ERC20_PER_ACCOUNT / 4n + amount: ERC20_PER_ACCOUNT / 8n }) .then((tx) => tx.wait()); }); @@ -95,6 +112,122 @@ describe('Tests for the custom account behavior', () => { ).toBeAccepted([erc20BalanceChange, feeCheck]); }); + test('Should fail transaction validation due to timestamp assertion in the validation tracer - close to the range end', async () => { + const now = Math.floor(Date.now() / 1000); + const minTimeTillEnd = testMaster.environment().timestampAsserterMinTimeTillEndSec; + const rangeStart = now - 10; + const rangeEnd = now + minTimeTillEnd / 2; + + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + rangeStart, + rangeEnd + ); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + + await expect( + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + await customAccount.getAddress(), + testMaster.environment().l2ChainId + ) + ).toBeRejected( + 'failed to validate the transaction. reason: Violated validation rules: block.timestamp is too close to the range end' + ); + }); + + test('Should execute contract by custom account when timestamp asserter range end overflows', async () => { + // This test ensures that a custom account transaction completes successfully + // even when the timestamp asserter's range end exceeds `u64::MAX`. In such cases, + // the range is capped at `u64::MAX` and processed as expected. + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + 0, + BigInt('3402823669209384634633746074317682') // u128::MAX + ); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + const customAccountAddress = await customAccount.getAddress(); + const erc20BalanceChange = await shouldChangeTokenBalances(erc20Address, [ + { + addressToCheck: customAccountAddress, + wallet: alice, + change: -TRANSFER_AMOUNT + }, + { wallet: alice, change: TRANSFER_AMOUNT } + ]); + const feeCheck = await shouldChangeETHBalances([ + { addressToCheck: customAccountAddress, wallet: alice, change: 0n } + ]); + + await expect( + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + await customAccount.getAddress(), + testMaster.environment().l2ChainId + ) + ).toBeAccepted([erc20BalanceChange, feeCheck]); + }); + + test('Should fail to estimate fee due to block.timestamp assertion in the smart contract', async () => { + const now = Math.floor(Date.now() / 1000); + const rangeStart = now + 300; + const rangeEnd = now + 1000; + + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + rangeStart, + rangeEnd + ); + const customAccountAddress = await customAccount.getAddress(); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + + try { + await sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + customAccountAddress, + testMaster.environment().l2ChainId, + undefined, + undefined, + false + ); + expect(null).fail('The transaction was expected to fail'); + } catch (e) { + const err = e as Error; + expect(err.message).toContain( + 'failed to validate the transaction. reason: Validation revert: Account validation error' + ); + const functionSelectorMatch = err.message.match(/function_selector\s=\s(0x[0-9a-fA-F]{8})/); + const calldataMatch = err.message.match(/data\s=\s(0x[0-9a-fA-F]+)/); + + expect(functionSelectorMatch && calldataMatch).toBeTruthy(); + + const functionSelector = functionSelectorMatch![1]; + expect(functionSelector).toBe('0x3d5740d9'); + + const calldata = calldataMatch![1]; + + const startHex = calldata.slice(74, 138); + const endHex = calldata.slice(138); + const start = BigInt(`0x${startHex}`); + const end = BigInt(`0x${endHex}`); + + expect(start).toBe(BigInt(rangeStart)); + expect(end).toBe(BigInt(rangeEnd)); + } + }); + test('Should fail the validation with incorrect signature', async () => { const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); const fakeSignature = new Uint8Array(12); @@ -112,7 +245,17 @@ describe('Tests for the custom account behavior', () => { test('Should not allow violating validation rules', async () => { // We configure account to violate storage access rules during tx validation. const violateRules = true; - const badCustomAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); + const badCustomAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'createAccount' + ); const badCustomAccountAddress = await badCustomAccount.getAddress(); // Fund the account. @@ -145,7 +288,17 @@ describe('Tests for the custom account behavior', () => { // Note that we supply "create" instead of "createAccount" here -- the code is the same, but it'll // be treated as a common contract. const violateRules = false; - const nonAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'create'); + const nonAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'create' + ); const nonAccountAddress = await nonAccount.getAddress(); // Fund the account. @@ -203,7 +356,12 @@ describe('Tests for the custom account behavior', () => { const badCustomAccount = await deployContract( alice, contracts.customAccount, - [violateStorageRules], + [ + violateStorageRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], 'createAccount' ); const badCustomAccountAddress = await badCustomAccount.getAddress(); @@ -244,7 +402,12 @@ describe('Tests for the custom account behavior', () => { const badCustomAccount = await deployContract( alice, contracts.customAccount, - [violateStorageRules], + [ + violateStorageRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], 'createAccount' ); const badCustomAccountAddress = await badCustomAccount.getAddress(); @@ -316,12 +479,11 @@ async function sendCustomAccountTransaction( accountAddress: string, chainId: bigint, customSignature?: Uint8Array, - nonce?: number + nonce?: number, + estimateGas: boolean = true ) { - const gasLimit = await browserProvider.estimateGas({ - ...tx, - from: accountAddress - }); + const gasLimit = estimateGas ? await browserProvider.estimateGas({ ...tx, from: accountAddress }) : BigInt(100_000); // Enough gas to invoke AA contract + const gasPrice = await browserProvider.getGasPrice(); tx.gasLimit = gasLimit; @@ -345,3 +507,30 @@ async function sendCustomAccountTransaction( return await browserProvider.broadcastTransaction(serializedTx); } + +async function deployAndFundCustomAccount( + richAccount: zksync.Wallet, + erc20Address: string, + timestampAsserterAddress: string, + rangeStart: any, + rangeEnd: any +): Promise { + const customAccount = await deployContract( + richAccount, + contracts.customAccount, + [false, timestampAsserterAddress, rangeStart, rangeEnd], + 'createAccount' + ); + + await richAccount + .transfer({ to: await customAccount.getAddress(), amount: ETH_PER_CUSTOM_ACCOUNT }) + .then((tx) => tx.wait()); + await richAccount + .transfer({ + to: await customAccount.getAddress(), + token: erc20Address, + amount: ERC20_PER_ACCOUNT / 8n + }) + .then((tx) => tx.wait()); + return customAccount; +} diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 94758d92e18..6e0e913f5bc 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -377,6 +377,9 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 +timestamp_asserter: + min_time_till_end_sec: 60 + consensus: port: 3054 server_addr: "127.0.0.1:3054" diff --git a/zkstack_cli/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs index 6d336b5cfc1..79044a59f3a 100644 --- a/zkstack_cli/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -7,7 +7,7 @@ use crate::{ deploy_ecosystem::output::DeployL1Output, deploy_l2_contracts::output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, - Multicall3Output, + Multicall3Output, TimestampAsserterOutput, }, register_chain::output::RegisterChainOutput, }, @@ -109,6 +109,14 @@ impl ContractsConfig { self.l2.multicall3 = Some(multicall3_output.multicall3); Ok(()) } + + pub fn set_timestamp_asserter_addr( + &mut self, + timestamp_asserter_output: &TimestampAsserterOutput, + ) -> anyhow::Result<()> { + self.l2.timestamp_asserter_addr = Some(timestamp_asserter_output.timestamp_asserter); + Ok(()) + } } impl FileConfigWithDefaultName for ContractsConfig { @@ -161,4 +169,5 @@ pub struct L2Contracts { pub consensus_registry: Option
, pub multicall3: Option
, pub legacy_shared_bridge_addr: Option
, + pub timestamp_asserter_addr: Option
, } diff --git a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 29be89b9101..7b2b56c8154 100644 --- a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -8,6 +8,8 @@ impl ZkStackConfig for DefaultL2UpgradeOutput {} impl ZkStackConfig for ConsensusRegistryOutput {} impl ZkStackConfig for Multicall3Output {} +impl ZkStackConfig for TimestampAsserterOutput {} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { pub l2_shared_bridge_implementation: Address, @@ -29,3 +31,8 @@ pub struct ConsensusRegistryOutput { pub struct Multicall3Output { pub multicall3: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimestampAsserterOutput { + pub timestamp_asserter: Address, +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 8dbd5c371c8..091bef86d26 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -12,7 +12,7 @@ use config::{ input::DeployL2ContractsInput, output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, - Multicall3Output, + Multicall3Output, TimestampAsserterOutput, }, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, @@ -236,6 +236,8 @@ pub async fn deploy_l2_contracts( contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?)?; + contracts_config + .set_timestamp_asserter_addr(&TimestampAsserterOutput::read(shell, out)?)?; Ok(()) }, )