From a603ac8eaab112738e1c2336b0f537273ad58d85 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Mon, 20 May 2024 16:57:21 +0300 Subject: [PATCH 1/4] fix: Disallow non null updates for transactions (#1951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add check to `set_tx_id` query, that doesn't allow to update tx_id if it is not NULL. ## Why ❔ To prevent issues when 1 batch can be commited 2 times. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- ...18daf76a5f283e4298fd12022b0c3db07319.json} | 4 +- ...64174f39e6011fdfdc56490397ce90233055.json} | 4 +- ...779128de288484abea33d338c3304dd66e08.json} | 4 +- core/lib/dal/src/blocks_dal.rs | 184 +++++++++++++++--- core/lib/db_connection/src/instrument.rs | 6 +- core/node/eth_sender/src/tests.rs | 161 ++++++++++++++- 6 files changed, 317 insertions(+), 46 deletions(-) rename core/lib/dal/.sqlx/{query-245dc5bb82cc82df38e4440a7746ca08324bc86a72e4ea85c9c7962a6c8c9e30.json => query-25ef41cbeb95d10e4051b822769518daf76a5f283e4298fd12022b0c3db07319.json} (75%) rename core/lib/dal/.sqlx/{query-dea22358feed1418430505767d03aa4239d3a8be71b47178b4b8fb11fe898b31.json => query-c81438eae5e2482c57c54941780864174f39e6011fdfdc56490397ce90233055.json} (75%) rename core/lib/dal/.sqlx/{query-012bed5d34240ed28c331c8515c381d82925556a4801f678b8786235d525d784.json => query-f3c651a3ecd2aefabef802f32c18779128de288484abea33d338c3304dd66e08.json} (75%) diff --git a/core/lib/dal/.sqlx/query-245dc5bb82cc82df38e4440a7746ca08324bc86a72e4ea85c9c7962a6c8c9e30.json b/core/lib/dal/.sqlx/query-25ef41cbeb95d10e4051b822769518daf76a5f283e4298fd12022b0c3db07319.json similarity index 75% rename from core/lib/dal/.sqlx/query-245dc5bb82cc82df38e4440a7746ca08324bc86a72e4ea85c9c7962a6c8c9e30.json rename to core/lib/dal/.sqlx/query-25ef41cbeb95d10e4051b822769518daf76a5f283e4298fd12022b0c3db07319.json index 0b9c4aa59b7a..079246791a98 100644 --- a/core/lib/dal/.sqlx/query-245dc5bb82cc82df38e4440a7746ca08324bc86a72e4ea85c9c7962a6c8c9e30.json +++ b/core/lib/dal/.sqlx/query-25ef41cbeb95d10e4051b822769518daf76a5f283e4298fd12022b0c3db07319.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n eth_prove_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n ", + "query": "\n UPDATE l1_batches\n SET\n eth_prove_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n AND eth_prove_tx_id IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "245dc5bb82cc82df38e4440a7746ca08324bc86a72e4ea85c9c7962a6c8c9e30" + "hash": "25ef41cbeb95d10e4051b822769518daf76a5f283e4298fd12022b0c3db07319" } diff --git a/core/lib/dal/.sqlx/query-dea22358feed1418430505767d03aa4239d3a8be71b47178b4b8fb11fe898b31.json b/core/lib/dal/.sqlx/query-c81438eae5e2482c57c54941780864174f39e6011fdfdc56490397ce90233055.json similarity index 75% rename from core/lib/dal/.sqlx/query-dea22358feed1418430505767d03aa4239d3a8be71b47178b4b8fb11fe898b31.json rename to core/lib/dal/.sqlx/query-c81438eae5e2482c57c54941780864174f39e6011fdfdc56490397ce90233055.json index ef070554c2fd..eb09077290e3 100644 --- a/core/lib/dal/.sqlx/query-dea22358feed1418430505767d03aa4239d3a8be71b47178b4b8fb11fe898b31.json +++ b/core/lib/dal/.sqlx/query-c81438eae5e2482c57c54941780864174f39e6011fdfdc56490397ce90233055.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n eth_execute_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n ", + "query": "\n UPDATE l1_batches\n SET\n eth_execute_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n AND eth_execute_tx_id IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "dea22358feed1418430505767d03aa4239d3a8be71b47178b4b8fb11fe898b31" + "hash": "c81438eae5e2482c57c54941780864174f39e6011fdfdc56490397ce90233055" } diff --git a/core/lib/dal/.sqlx/query-012bed5d34240ed28c331c8515c381d82925556a4801f678b8786235d525d784.json b/core/lib/dal/.sqlx/query-f3c651a3ecd2aefabef802f32c18779128de288484abea33d338c3304dd66e08.json similarity index 75% rename from core/lib/dal/.sqlx/query-012bed5d34240ed28c331c8515c381d82925556a4801f678b8786235d525d784.json rename to core/lib/dal/.sqlx/query-f3c651a3ecd2aefabef802f32c18779128de288484abea33d338c3304dd66e08.json index fbeefdfbf956..7d5467b4459c 100644 --- a/core/lib/dal/.sqlx/query-012bed5d34240ed28c331c8515c381d82925556a4801f678b8786235d525d784.json +++ b/core/lib/dal/.sqlx/query-f3c651a3ecd2aefabef802f32c18779128de288484abea33d338c3304dd66e08.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n eth_commit_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n ", + "query": "\n UPDATE l1_batches\n SET\n eth_commit_tx_id = $1,\n updated_at = NOW()\n WHERE\n number BETWEEN $2 AND $3\n AND eth_commit_tx_id IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "012bed5d34240ed28c331c8515c381d82925556a4801f678b8786235d525d784" + "hash": "f3c651a3ecd2aefabef802f32c18779128de288484abea33d338c3304dd66e08" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 467e5437c1fa..3e805e92f5f1 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -406,7 +406,11 @@ impl BlocksDal<'_, '_> { ) -> DalResult<()> { match aggregation_type { AggregatedActionType::Commit => { - sqlx::query!( + let instrumentation = Instrumented::new("set_eth_tx_id#commit") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id); + + let query = sqlx::query!( r#" UPDATE l1_batches SET @@ -414,19 +418,30 @@ impl BlocksDal<'_, '_> { updated_at = NOW() WHERE number BETWEEN $2 AND $3 + AND eth_commit_tx_id IS NULL "#, eth_tx_id as i32, i64::from(number_range.start().0), i64::from(number_range.end().0) - ) - .instrument("set_eth_tx_id#commit") - .with_arg("number_range", &number_range) - .with_arg("eth_tx_id", ð_tx_id) - .execute(self.storage) - .await?; + ); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Update eth_commit_tx_id that is is not null is not allowed" + )); + return Err(err); + } } AggregatedActionType::PublishProofOnchain => { - sqlx::query!( + let instrumentation = Instrumented::new("set_eth_tx_id#prove") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id); + let query = sqlx::query!( r#" UPDATE l1_batches SET @@ -434,19 +449,32 @@ impl BlocksDal<'_, '_> { updated_at = NOW() WHERE number BETWEEN $2 AND $3 + AND eth_prove_tx_id IS NULL "#, eth_tx_id as i32, i64::from(number_range.start().0), i64::from(number_range.end().0) - ) - .instrument("set_eth_tx_id#prove") - .with_arg("number_range", &number_range) - .with_arg("eth_tx_id", ð_tx_id) - .execute(self.storage) - .await?; + ); + + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Update eth_prove_tx_id that is is not null is not allowed" + )); + return Err(err); + } } AggregatedActionType::Execute => { - sqlx::query!( + let instrumentation = Instrumented::new("set_eth_tx_id#execute") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id); + + let query = sqlx::query!( r#" UPDATE l1_batches SET @@ -454,16 +482,25 @@ impl BlocksDal<'_, '_> { updated_at = NOW() WHERE number BETWEEN $2 AND $3 + AND eth_execute_tx_id IS NULL "#, eth_tx_id as i32, i64::from(number_range.start().0), i64::from(number_range.end().0) - ) - .instrument("set_eth_tx_id#execute") - .with_arg("number_range", &number_range) - .with_arg("eth_tx_id", ð_tx_id) - .execute(self.storage) - .await?; + ); + + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Update eth_execute_tx_id that is is not null is not allowed" + )); + return Err(err); + } } } Ok(()) @@ -2233,15 +2270,14 @@ mod tests { use super::*; use crate::{ConnectionPool, Core, CoreDal}; - #[tokio::test] - async fn loading_l1_batch_header() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) + async fn save_mock_eth_tx(action_type: AggregatedActionType, conn: &mut Connection<'_, Core>) { + conn.eth_sender_dal() + .save_eth_tx(1, vec![], action_type, Address::default(), 1, None, None) .await .unwrap(); + } + fn mock_l1_batch_header() -> L1BatchHeader { let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, @@ -2264,6 +2300,100 @@ mod tests { header.l2_to_l1_messages.push(vec![22; 22]); header.l2_to_l1_messages.push(vec![33; 33]); + header + } + + #[tokio::test] + async fn set_tx_id_works_correctly() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + conn.blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header()) + .await + .unwrap(); + + save_mock_eth_tx(AggregatedActionType::Commit, &mut conn).await; + save_mock_eth_tx(AggregatedActionType::PublishProofOnchain, &mut conn).await; + save_mock_eth_tx(AggregatedActionType::Execute, &mut conn).await; + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 1, + AggregatedActionType::Commit, + ) + .await + .is_ok()); + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 2, + AggregatedActionType::Commit, + ) + .await + .is_err()); + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 1, + AggregatedActionType::PublishProofOnchain, + ) + .await + .is_ok()); + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 2, + AggregatedActionType::PublishProofOnchain, + ) + .await + .is_err()); + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 1, + AggregatedActionType::Execute, + ) + .await + .is_ok()); + + assert!(conn + .blocks_dal() + .set_eth_tx_id( + L1BatchNumber(1)..=L1BatchNumber(1), + 2, + AggregatedActionType::Execute, + ) + .await + .is_err()); + } + + #[tokio::test] + async fn loading_l1_batch_header() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let header = mock_l1_batch_header(); + conn.blocks_dal() .insert_mock_l1_batch(&header) .await diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index c61fad25b1ed..e0728ce22b85 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -31,7 +31,7 @@ use crate::{ type ThreadSafeDebug<'a> = dyn fmt::Debug + Send + Sync + 'a; /// Logged arguments for an SQL query. -#[derive(Debug, Default)] +#[derive(Debug, Clone, Default)] struct QueryArgs<'a> { inner: Vec<(&'static str, &'a ThreadSafeDebug<'a>)>, } @@ -180,7 +180,7 @@ impl ActiveCopy<'_> { } } -#[derive(Debug)] +#[derive(Debug, Clone)] struct InstrumentedData<'a> { name: &'static str, location: &'static Location<'static>, @@ -278,7 +278,7 @@ impl<'a> InstrumentedData<'a> { /// included in the case of a slow query, plus the error info. /// - Slow and erroneous queries are also reported using metrics (`dal.request.slow` and `dal.request.error`, /// respectively). The query name is included as a metric label; args are not included for obvious reasons. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Instrumented<'a, Q> { query: Q, data: InstrumentedData<'a>, diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index aa776311554f..a1ca544f8fe4 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -7,6 +7,7 @@ use zksync_config::{ configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, ContractsConfig, EthConfig, GasAdjusterConfig, }; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; @@ -20,6 +21,7 @@ use zksync_types::{ }, ethabi::Token, helpers::unix_timestamp_ms, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, pubdata_da::PubdataDA, web3::contract::Error, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, H256, @@ -43,11 +45,47 @@ static DUMMY_OPERATION: Lazy = Lazy::new(|| { }) }); +fn get_dummy_operation(number: u32) -> AggregatedOperation { + AggregatedOperation::Execute(ExecuteBatches { + l1_batches: vec![L1BatchWithMetadata { + header: create_l1_batch(number), + metadata: default_l1_batch_metadata(), + raw_published_factory_deps: Vec::new(), + }], + }) +} + const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Rollup, L1BatchCommitmentMode::Validium, ]; +fn mock_l1_batch_header(number: u32) -> L1BatchHeader { + let mut header = L1BatchHeader::new( + L1BatchNumber(number), + 100, + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ); + header.l1_tx_count = 3; + header.l2_tx_count = 5; + header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block: 2, + sender: Address::repeat_byte(2), + key: H256::repeat_byte(3), + value: H256::zero(), + })); + header.l2_to_l1_messages.push(vec![22; 22]); + header.l2_to_l1_messages.push(vec![33; 33]); + + header +} + fn mock_multicall_response() -> Token { Token::Array(vec![ Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), @@ -228,7 +266,7 @@ async fn confirm_many( ) -> anyhow::Result<()> { let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( - connection_pool, + connection_pool.clone(), vec![10; 100], false, aggregator_operate_4844_mode, @@ -238,12 +276,31 @@ async fn confirm_many( let mut hashes = vec![]; - for _ in 0..5 { + connection_pool + .clone() + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + for number in 0..5 { + connection_pool + .clone() + .connection() + .await + .unwrap() + .blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header(number + 1)) + .await + .unwrap(); let tx = tester .aggregator .save_eth_tx( &mut tester.conn.connection().await.unwrap(), - &DUMMY_OPERATION, + &get_dummy_operation(number + 1), false, ) .await?; @@ -310,8 +367,9 @@ async fn confirm_many( #[test_casing(2, COMMITMENT_MODES)] #[tokio::test] async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Result<()> { + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( - ConnectionPool::::test_pool().await, + connection_pool.clone(), vec![7, 6, 5, 5, 5, 2, 1], false, false, @@ -323,12 +381,33 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re tester.gateway.advance_block_number(3); tester.gas_adjuster.keep_updated().await?; + connection_pool + .clone() + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + connection_pool + .clone() + .connection() + .await + .unwrap() + .blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header(1)) + .await + .unwrap(); + let block = tester.get_block_numbers().await.latest; + let tx = tester .aggregator .save_eth_tx( &mut tester.conn.connection().await.unwrap(), - &DUMMY_OPERATION, + &get_dummy_operation(1), false, ) .await?; @@ -422,8 +501,9 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re #[test_casing(2, COMMITMENT_MODES)] #[tokio::test] async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> anyhow::Result<()> { + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( - ConnectionPool::::test_pool().await, + connection_pool.clone(), vec![100; 100], false, false, @@ -431,6 +511,26 @@ async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> an ) .await; + connection_pool + .clone() + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + connection_pool + .clone() + .connection() + .await + .unwrap() + .blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header(1)) + .await + .unwrap(); + let tx = tester .aggregator .save_eth_tx( @@ -501,8 +601,9 @@ async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> an #[test_casing(2, COMMITMENT_MODES)] #[tokio::test] async fn three_scenarios(commitment_mode: L1BatchCommitmentMode) -> anyhow::Result<()> { + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( - ConnectionPool::::test_pool().await, + connection_pool.clone(), vec![100; 100], false, false, @@ -512,12 +613,31 @@ async fn three_scenarios(commitment_mode: L1BatchCommitmentMode) -> anyhow::Resu let mut hashes = vec![]; - for _ in 0..3 { + connection_pool + .clone() + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + for number in 0..3 { + connection_pool + .clone() + .connection() + .await + .unwrap() + .blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header(number + 1)) + .await + .unwrap(); let tx = tester .aggregator .save_eth_tx( &mut tester.conn.connection().await.unwrap(), - &DUMMY_OPERATION, + &get_dummy_operation(number + 1), false, ) .await @@ -581,8 +701,9 @@ async fn three_scenarios(commitment_mode: L1BatchCommitmentMode) -> anyhow::Resu #[test_casing(2, COMMITMENT_MODES)] #[tokio::test] async fn failed_eth_tx(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( - ConnectionPool::::test_pool().await, + connection_pool.clone(), vec![100; 100], false, false, @@ -590,6 +711,26 @@ async fn failed_eth_tx(commitment_mode: L1BatchCommitmentMode) { ) .await; + connection_pool + .clone() + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + connection_pool + .clone() + .connection() + .await + .unwrap() + .blocks_dal() + .insert_mock_l1_batch(&mock_l1_batch_header(1)) + .await + .unwrap(); + let tx = tester .aggregator .save_eth_tx( From 7b0df3b22f04f1fdead308ec30572f565b34dd5c Mon Sep 17 00:00:00 2001 From: Joaquin Carletti <56092489+ColoCarletti@users.noreply.github.com> Date: Mon, 20 May 2024 11:37:08 -0300 Subject: [PATCH 2/4] feat(prover_cli): add general status for batch command (#1953) This PR adds a check to the batch status command to show that a batch does not exist or that the proving process for it has already finished. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- prover/prover_cli/src/commands/status/batch.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/prover/prover_cli/src/commands/status/batch.rs b/prover/prover_cli/src/commands/status/batch.rs index 389437f17ac7..6f52170444a5 100644 --- a/prover/prover_cli/src/commands/status/batch.rs +++ b/prover/prover_cli/src/commands/status/batch.rs @@ -35,6 +35,22 @@ pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<( "== {} ==", format!("Batch {} Status", batch_data.batch_number).bold() ); + + if let Status::Custom(msg) = batch_data.compressor.witness_generator_jobs_status() { + if msg.contains("Sent to server") { + println!("> Proof sent to server ✅"); + return Ok(()); + } + } + + let basic_witness_generator_status = batch_data + .basic_witness_generator + .witness_generator_jobs_status(); + if matches!(basic_witness_generator_status, Status::JobsNotFound) { + println!("> No batch found. 🚫"); + return Ok(()); + } + if !args.verbose { display_batch_status(batch_data); } else { From a2db264de86253703049faa8926a0512c0f7a6ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Mon, 20 May 2024 16:54:28 +0200 Subject: [PATCH 3/4] fix(ci): Ignore errors in 'show sccache logs' for case when zk service wasn't started (#1990) Signed-off-by: tomg10 --- .../workflows/build-contract-verifier-template.yml | 4 ++-- .github/workflows/build-core-template.yml | 4 ++-- .github/workflows/build-prover-template.yml | 4 ++-- .github/workflows/ci-core-reusable.yml | 12 ++++++------ 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index fab6a6f18a58..07185f77e477 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -143,8 +143,8 @@ jobs: - name: Show sccache stats if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true create_manifest: name: Create release manifest diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 29b66d991f01..eb8faf5a0ba6 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -152,8 +152,8 @@ jobs: - name: Show sccache stats if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true create_manifest: name: Create release manifest diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 4da79fccb40a..068118f4ab97 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -145,8 +145,8 @@ jobs: - name: Show sccache stats if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true copy-images: name: Copy images between docker registries diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 39b389ef94ed..a50c39f62ae8 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -120,8 +120,8 @@ jobs: - name: Show sccache logs if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true integration: name: Integration (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }}) @@ -268,8 +268,8 @@ jobs: - name: Show sccache logs if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true external-node: name: External node (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }}) @@ -389,5 +389,5 @@ jobs: - name: Show sccache logs if: always() run: | - ci_run sccache --show-stats - ci_run cat /tmp/sccache_log.txt + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true From e55b11fb2a438cd97d2b5523d0d55d03bc0f0071 Mon Sep 17 00:00:00 2001 From: Agustin Aon <21188659+aon@users.noreply.github.com> Date: Mon, 20 May 2024 17:34:33 -0300 Subject: [PATCH 4/4] feat: add foundry installation to zk-environment Dockerfile (#1995) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds foundry installation to zk-environment Dockerfile ## Why ❔ - This is necessary for adding CI to zk_toolbox ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- docker/zk-environment/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 6690d317d2a3..1ed60f4b95f1 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -125,6 +125,10 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ cd valgrind-3.20.0 && ./configure && make && make install && \ cd ../ && rm -rf valgrind-3.20.0.tar.bz2 && rm -rf valgrind-3.20.0 +# Install foundry +RUN cargo install --git https://github.com/foundry-rs/foundry \ + --profile local --locked forge cast + # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync ENV PATH="${ZKSYNC_HOME}/bin:${PATH}"