diff --git a/.gitignore b/.gitignore index 3f6af5e8..15eb71df 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ node_modules xtask-end-poll/target **/target rustc-ice-* + +**/borsh_encoding_message_limits_report.csv +**/abi_encoding_message_limits_report.csv diff --git a/solana/Cargo.lock b/solana/Cargo.lock index 6ccdd12e..06b6d706 100644 --- a/solana/Cargo.lock +++ b/solana/Cargo.lock @@ -2062,6 +2062,31 @@ dependencies = [ "subtle", ] +[[package]] +name = "csv-async" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37fe5b0d07f4a8260ce1e9a81413e88f459af0f2dfc55c15e96868a2f99c0f0" +dependencies = [ + "cfg-if 1.0.0", + "csv-core", + "futures 0.3.30", + "itoa", + "ryu", + "serde", + "tokio", + "tokio-stream", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.9.2" @@ -2280,6 +2305,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.70", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +dependencies = [ + "derive_builder_core", + "syn 2.0.70", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -11325,6 +11381,8 @@ dependencies = [ "const_format", "cosmrs", "cosmwasm-schema 2.0.4", + "csv-async", + "derive_builder", "ethers", "evm-contracts-test-suite", "eyre", @@ -11340,6 +11398,7 @@ dependencies = [ "multisig", "proc-macro2", "quote", + "rand 0.8.5", "reqwest 0.12.5", "router-api", "rust_decimal", @@ -11349,6 +11408,7 @@ dependencies = [ "solana-cli-config", "solana-client", "solana-program", + "solana-rpc", "solana-rpc-client-api", "solana-sdk", "solana-streamer", @@ -11357,6 +11417,7 @@ dependencies = [ "tar", "tempfile", "test-fixtures", + "thiserror", "tokio", "toml 0.8.14", "tracing", diff --git a/solana/xtask/Cargo.toml b/solana/xtask/Cargo.toml index 1d7b301e..62b8cf35 100644 --- a/solana/xtask/Cargo.toml +++ b/solana/xtask/Cargo.toml @@ -35,6 +35,7 @@ ethers.workspace = true evm-contracts-test-suite.workspace = true axelar-solana-memo-program.workspace = true solana-cli-config = "2.0.1" +solana-rpc = "2.0.1" solana-rpc-client-api = "2.0.1" solana-test-validator = "2.0.5" solana-transaction-status = "2.0.1" @@ -63,6 +64,10 @@ router-api = { path = "../../axelar-amplifier/packages/router-api" } axelar-rkyv-encoding.workspace = true axelar-executable.workspace = true itertools.workspace = true +rand.workspace = true +thiserror.workspace = true +derive_builder = "0.20" +csv-async = { version = "1.3.0", features = ["tokio"] } [dev-dependencies] solana-program.workspace = true diff --git a/solana/xtask/src/cli.rs b/solana/xtask/src/cli.rs index a547921a..6ece0387 100644 --- a/solana/xtask/src/cli.rs +++ b/solana/xtask/src/cli.rs @@ -2,6 +2,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; +use axelar_message_primitives::EncodingScheme; use clap::{Parser, Subcommand}; use cmd::solana::SolanaContract; use ethers::core::k256::ecdsa::SigningKey; @@ -219,6 +220,11 @@ pub(crate) enum Solana { MessageLimitsReport { /// Where to output the report output_dir: PathBuf, + + /// Enable ABI encoding scheme. When ommited, borsh + /// encoding is used. + #[arg(short, long)] + abi_encoding: bool, }, Init { #[command(subcommand)] @@ -448,8 +454,17 @@ async fn handle_solana(command: Solana) -> eyre::Result<()> { ws_url.as_ref(), )?; } - Solana::MessageLimitsReport { output_dir } => { - cmd::solana::generate_message_limits_report(&output_dir).await?; + Solana::MessageLimitsReport { + output_dir, + abi_encoding, + } => { + let encoding = if abi_encoding { + EncodingScheme::AbiEncoding + } else { + EncodingScheme::Borsh + }; + + cmd::solana::generate_message_limits_report(&output_dir, encoding).await?; } Solana::Init { contract } => match contract { SolanaInitSubcommand::GmpGateway { diff --git a/solana/xtask/src/cli/cmd/solana/message_limits.rs b/solana/xtask/src/cli/cmd/solana/message_limits.rs index 47235b6e..16b4a515 100644 --- a/solana/xtask/src/cli/cmd/solana/message_limits.rs +++ b/solana/xtask/src/cli/cmd/solana/message_limits.rs @@ -8,27 +8,34 @@ use axelar_message_primitives::command::U256; use axelar_message_primitives::{DataPayload, EncodingScheme}; use axelar_rkyv_encoding::test_fixtures::random_weight; use axelar_rkyv_encoding::types::{HasheableMessageVec, Message, Payload}; +use derive_builder::Builder; use gmp_gateway::axelar_auth_weighted::RotationDelaySecs; use gmp_gateway::commands::OwnedCommand; use gmp_gateway::instructions::{InitializeConfig, VerifierSetWraper}; use gmp_gateway::state::{GatewayApprovedCommand, GatewayConfig, GatewayExecuteData}; use itertools::izip; +use rand::distributions::Alphanumeric; +use rand::Rng; +use serde::Serialize; use solana_client::client_error::ClientErrorKind; use solana_client::nonblocking::rpc_client::RpcClient; +use solana_client::rpc_config::RpcTransactionConfig; use solana_client::rpc_request::RpcError; +use solana_rpc::rpc::JsonRpcConfig; use solana_rpc_client_api::client_error::Error as RpcClientError; +use solana_sdk::commitment_config::CommitmentConfig; use solana_sdk::compute_budget::ComputeBudgetInstruction; use solana_sdk::instruction::{AccountMeta, Instruction}; use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::{Keypair, Signature}; +use solana_sdk::signature::Keypair; use solana_sdk::signer::Signer; use solana_sdk::transaction::Transaction; use solana_test_validator::{TestValidator, TestValidatorGenesis, UpgradeableProgramInfo}; +use solana_transaction_status::{Encodable, UiTransactionEncoding}; use test_fixtures::axelar_message::custom_message; use test_fixtures::execute_data::prepare_execute_data; use test_fixtures::test_setup::{self, SigningVerifierSet}; use tokio::fs::File; -use tokio::io::{AsyncWriteExt, BufWriter}; use tokio::sync::Mutex; use tracing::level_filters::LevelFilter; @@ -41,15 +48,24 @@ const DEFAULT_PREVIOUS_SIGNERS_RETENTION: U256 = U256::from_u64(4); const DOMAIN_SEPARATOR: [u8; 32] = [0u8; 32]; const LEDGER_PATH: &str = "/tmp/ledger"; const MAX_CONCURRENT_ITERATIONS: usize = 200; -const MESSAGES_PER_BATCH_RANGE: std::ops::Range = 1..usize::MAX; -const MESSAGE_SIZE_RANGE: std::ops::Range = 0..usize::MAX; -const NONCE: u64 = 55; -const SIGNERS_AMOUNT_RANGE: std::ops::Range = 1..usize::MAX; const MESSAGE_SIZE_EXCEEDED_ERROR: i64 = -32602; +const NONCE: u64 = 55; +const OUTPUT_FILENAME: &str = "message_limits_report.csv"; + +const MIN_MESSAGE_SIZE: usize = 0; +const MIN_MESSAGES_PER_BATCH: usize = 1; +const MIN_SIGNERS_AMOUNT: usize = 1; +const MIN_ACCOUNTS_AMOUNT: usize = 0; + +const MESSAGES_PER_BATCH_RANGE: std::ops::Range = MIN_MESSAGES_PER_BATCH..usize::MAX; +const MESSAGE_SIZE_RANGE: std::ops::Range = MIN_MESSAGE_SIZE..usize::MAX; +const SIGNERS_AMOUNT_RANGE: std::ops::Range = MIN_SIGNERS_AMOUNT..usize::MAX; +const ACCOUNTS_AMOUNT_RANGE: std::ops::Range = MIN_ACCOUNTS_AMOUNT..usize::MAX; -static BREAK_MESSAGE_SIZE: AtomicBool = AtomicBool::new(false); static BREAK_BATCH_SIZE: AtomicBool = AtomicBool::new(false); +static BREAK_MESSAGE_SIZE: AtomicBool = AtomicBool::new(false); static BREAK_SIGNERS_SIZE: AtomicBool = AtomicBool::new(false); +static BREAK_ACCOUNTS_SIZE: AtomicBool = AtomicBool::new(false); /// This function spins up a test validator with gateway and memo program /// deployed and performs the flow for incoming gateway messages, @@ -59,32 +75,35 @@ static BREAK_SIGNERS_SIZE: AtomicBool = AtomicBool::new(false); /// - 3. initialize pending commands /// - 4. approve messages /// - 5. execute messages -/// - 6. validate message execution +/// - 6. validate message execution (this is a CPI from the memo program to the +/// gateway) /// /// This is done with different combinations of message size, number of messages /// per transaction and number of signers. The combinations that end up in /// successful iterations are recorded in a csv file saved in the directory /// passed as argument to the function. -pub(crate) async fn generate_message_limits_report(output_dir: &Path) -> eyre::Result<()> { +pub(crate) async fn generate_message_limits_report( + output_dir: &Path, + encoding: EncodingScheme, +) -> eyre::Result<()> { setup_panic_hook(); change_log_level(LevelFilter::ERROR); build_contracts(Some(&[path::gateway_manifest(), path::memo_manifest()]))?; - let mut report_file = File::create(output_dir.join("message_limits_report.csv")).await?; - report_file - .write_all(b"number_of_signers,messages_per_batch,message_size(bytes)\n") - .await?; - let writer = Arc::new(Mutex::new(BufWriter::new(report_file))); + let file_name = get_filename(encoding); + let file_path = output_dir.join(file_name); + let writer = Arc::new(Mutex::new(csv_async::AsyncSerializer::from_writer( + File::create(file_path).await?, + ))); - 'n_signers: for n_signers in SIGNERS_AMOUNT_RANGE { + 'signers: for num_signers in SIGNERS_AMOUNT_RANGE { let (validator, keypair) = clean_ledger_setup_validator().await; let initial_signers = test_setup::make_signers( - &(0..n_signers) + &(0..num_signers) .map(|_| random_weight().into()) .collect::>(), NONCE, ); - let keypair = Arc::new(keypair); let validator_rpc_client = Arc::new(validator.get_async_rpc_client()); let (gateway_config_pda, counter) = initialize_programs( @@ -97,105 +116,139 @@ pub(crate) async fn generate_message_limits_report(output_dir: &Path) -> eyre::R let initial_signers = Arc::new(initial_signers); let counter = Arc::new(counter.0); - 'n_messages: for n_messages in MESSAGES_PER_BATCH_RANGE { - let mut iterations = Vec::with_capacity(MAX_CONCURRENT_ITERATIONS); + 'batch: for batch_size in MESSAGES_PER_BATCH_RANGE { + 'accounts: for num_additional_accounts in ACCOUNTS_AMOUNT_RANGE { + let mut iterations = Vec::with_capacity(MAX_CONCURRENT_ITERATIONS); - for message_size in MESSAGE_SIZE_RANGE { - let writer = writer.clone(); - let counter = counter.clone(); - let keypair = keypair.clone(); - let gateway_config_pda = gateway_config_pda.clone(); - let initial_signers = initial_signers.clone(); - let validator_rpc_client = validator_rpc_client.clone(); - - iterations.push(async move { - match try_iteration_with_params( + for message_size in MESSAGE_SIZE_RANGE { + let inputs = IterationInputs { + num_signers, + batch_size, message_size, - n_messages, - keypair.clone(), - gateway_config_pda.clone(), - initial_signers.clone(), - counter.clone(), - validator_rpc_client.clone(), - ) - .await - { - Err(RpcClientError { - request: _, - kind: - ClientErrorKind::RpcError(RpcError::RpcResponseError { - code: MESSAGE_SIZE_EXCEEDED_ERROR, - .. - }), - }) => { - match (n_messages, message_size) { - // In case we are at the first iteration of both inner loops - // and we fail, we reached the overall limit and should stop - // running. - (1, 0) => BREAK_SIGNERS_SIZE.store(true, Ordering::Relaxed), - - // In case we fail within the first iteration of the innermost - // loop but not the first iteration of the middle loop, it - // means we've stressed out the possible combination of batch - // size and message size for the current number of signers, so - // we break the loop and move to the next number of signers. - (_, 0) => BREAK_BATCH_SIZE.store(true, Ordering::Relaxed), - - // In case we fail and it's not first iteration, just break the - // innermost loop and try the next batch size. - (_, _) => BREAK_MESSAGE_SIZE.store(true, Ordering::Relaxed), - }; - } - Ok(()) => { - writer - .lock() - .await - .write_all( - format!("{n_signers},{n_messages},{message_size}\n",) - .as_bytes(), - ) - .await - .expect("Failed to write to report file"); - } - Err(err) => { - panic!("Unexpected error occurred: {err}") - } + num_additional_accounts, + keypair: keypair.clone(), + gateway_config_pda: gateway_config_pda.clone(), + signers: initial_signers.clone(), + counter_pda: counter.clone(), + validator_rpc_client: validator_rpc_client.clone(), + encoding, + }; + let writer = writer.clone(); + + iterations.push(async move { + let iteration_output = try_iteration_with_params(inputs).await; + evaluate_iteration_with_side_effects( + iteration_output, + writer, + batch_size, + num_additional_accounts, + message_size, + ) + .await; + }); + + if iterations.len() == MAX_CONCURRENT_ITERATIONS { + futures::future::join_all(iterations).await; + iterations = Vec::with_capacity(MAX_CONCURRENT_ITERATIONS); + } + if BREAK_MESSAGE_SIZE.swap(false, Ordering::Relaxed) { + break; + } + if BREAK_ACCOUNTS_SIZE.swap(false, Ordering::Relaxed) { + break 'accounts; + } + if BREAK_BATCH_SIZE.swap(false, Ordering::Relaxed) { + break 'batch; + } + if BREAK_SIGNERS_SIZE.swap(false, Ordering::Relaxed) { + break 'signers; } - }); - - if iterations.len() == MAX_CONCURRENT_ITERATIONS { - futures::future::join_all(iterations).await; - iterations = Vec::with_capacity(MAX_CONCURRENT_ITERATIONS); - } - - if BREAK_MESSAGE_SIZE.swap(false, Ordering::Relaxed) { - break; - } - - if BREAK_BATCH_SIZE.swap(false, Ordering::Relaxed) { - break 'n_messages; - } - - if BREAK_SIGNERS_SIZE.swap(false, Ordering::Relaxed) { - break 'n_signers; } } } } - writer.lock().await.shutdown().await?; + writer.lock().await.flush().await?; Ok(()) } +#[derive(Debug, thiserror::Error)] +enum Error { + #[error("Transaction missing metadata information")] + TransactionMissingMetadata, + + #[error("RPC client error: {0}")] + RpcClient(#[from] RpcClientError), + + #[error("Solana program error: {0}")] + Program(#[from] solana_sdk::program_error::ProgramError), + + #[error("Gateway error: {0}")] + Gateway(#[from] gmp_gateway::error::GatewayError), + + #[error("Error building the csv row: {0}")] + RowBuilder(#[from] RowBuilderError), + + #[error("Error encoding data payload: {0}")] + Payload(#[from] axelar_message_primitives::PayloadError), + + #[error("Unexpected error: {0}")] + Unexpected(#[from] Box), +} + +#[derive(Debug, Builder, Serialize)] +struct Row { + #[serde(rename = "number_of_signers")] + num_signers: usize, + + #[serde(rename = "number_of_messages_per_batch")] + batch_size: usize, + + #[serde(rename = "message_size(bytes)")] + message_size: usize, + + #[serde(rename = "number_of_accounts")] + num_accounts: usize, + + #[serde(rename = "execute_data_size(bytes)")] + execute_data_size: usize, + + #[serde(rename = "init_approve_messages_pda_tx_size(bytes)")] + init_approve_messages_pda_tx_size: usize, + + #[serde(rename = "init_pending_cmd_tx_size(bytes)")] + init_pending_cmd_tx_size: usize, + + #[serde(rename = "approve_messages_tx_size(bytes)")] + approve_messages_tx_size: usize, + + #[serde(rename = "memo_call_tx_size(bytes)")] + memo_call_tx_size: usize, + + total_compute_units: u64, +} + +struct IterationInputs { + num_signers: usize, + batch_size: usize, + message_size: usize, + num_additional_accounts: usize, + keypair: Arc, + gateway_config_pda: Arc, + signers: Arc, + counter_pda: Arc, + validator_rpc_client: Arc, + encoding: EncodingScheme, +} + async fn initialize_programs( initial_signers: &SigningVerifierSet, keypair: Arc, validator_rpc_client: Arc, -) -> eyre::Result<(Pubkey, (Pubkey, u8))> { +) -> Result<(Pubkey, (Pubkey, u8)), Error> { let (gateway_config_pda, _) = GatewayConfig::pda(); - let verifier_set = VerifierSetWraper::new_from_verifier_set(initial_signers.verifier_set()) - .expect("Failed to create verifier set"); + let verifier_set = VerifierSetWraper::new_from_verifier_set(initial_signers.verifier_set())?; let initialize_config = InitializeConfig { domain_separator: DOMAIN_SEPARATOR, initial_signer_sets: vec![verifier_set], @@ -203,121 +256,300 @@ async fn initialize_programs( operator: Pubkey::new_unique(), previous_signers_retention: DEFAULT_PREVIOUS_SIGNERS_RETENTION, }; - - let ix = gmp_gateway::instructions::initialize_config( + let instruction = gmp_gateway::instructions::initialize_config( keypair.pubkey(), initialize_config, gateway_config_pda, )?; - submit_transaction(validator_rpc_client.clone(), keypair.clone(), &[ix]).await?; + submit_transaction( + validator_rpc_client.clone(), + keypair.clone(), + &[instruction], + false, + ) + .await?; let counter = axelar_solana_memo_program::get_counter_pda(&gateway_config_pda); - let ix = axelar_solana_memo_program::instruction::initialize( + let instruction = axelar_solana_memo_program::instruction::initialize( &keypair.pubkey(), &gateway_config_pda, &counter, )?; - submit_transaction(validator_rpc_client.clone(), keypair.clone(), &[ix]).await?; + submit_transaction( + validator_rpc_client.clone(), + keypair.clone(), + &[instruction], + false, + ) + .await?; Ok((gateway_config_pda, counter)) } -async fn try_iteration_with_params( - message_size: usize, - n_messages: usize, - keypair: Arc, - gateway_config_pda: Arc, - signers: Arc, - counter_pda: Arc, - validator_rpc_client: Arc, -) -> Result<(), RpcClientError> { - let payload_data = vec![0xF; message_size]; - let (messages, data_payloads): (Vec, Vec>) = (0..n_messages) - .map(|_| make_message_with_payload_data(&payload_data, *counter_pda)) - .unzip(); - let (payload, commands) = payload_and_commands(&messages); - let (raw_execute_data, _) = prepare_execute_data(payload, signers.as_ref(), &DOMAIN_SEPARATOR); +async fn do_init_approve_messages_execute_data( + inputs: &IterationInputs, + payload: Payload, + row_builder: &mut RowBuilder, +) -> Result<(Pubkey, u64), Error> { + let (raw_execute_data, _) = + prepare_execute_data(payload, inputs.signers.as_ref(), &DOMAIN_SEPARATOR); let execute_data = GatewayExecuteData::::new( &raw_execute_data, - gateway_config_pda.as_ref(), + inputs.gateway_config_pda.as_ref(), &DOMAIN_SEPARATOR, - ) - .expect("Failed to create execute data"); + )?; let (execute_data_pda, _) = gmp_gateway::get_execute_data_pda( - gateway_config_pda.as_ref(), + inputs.gateway_config_pda.as_ref(), &execute_data.hash_decoded_contents(), ); - - let (ix, _) = gmp_gateway::instructions::initialize_approve_messages_execute_data( - keypair.pubkey(), - *gateway_config_pda, + let (instruction, _) = gmp_gateway::instructions::initialize_approve_messages_execute_data( + inputs.keypair.pubkey(), + *inputs.gateway_config_pda, &DOMAIN_SEPARATOR, &raw_execute_data, + )?; + let (init_approve_messages_pda_tx_size, compute_units) = submit_transaction( + inputs.validator_rpc_client.clone(), + inputs.keypair.clone(), + &[instruction], + true, ) - .expect("Failed to create execute data instruction"); - submit_transaction(validator_rpc_client.clone(), keypair.clone(), &[ix]).await?; + .await?; - let pubkey = keypair.pubkey(); - let (gateway_approved_command_pdas, ixs): (Vec<_>, Vec<_>) = commands + row_builder.init_approve_messages_pda_tx_size(init_approve_messages_pda_tx_size); + row_builder.execute_data_size(raw_execute_data.len()); + + Ok((execute_data_pda, compute_units.unwrap())) +} + +async fn do_init_pending_commands( + inputs: &IterationInputs, + commands: Vec, + row_builder: &mut RowBuilder, +) -> Result<(Vec, u64), Error> { + let pubkey = inputs.keypair.pubkey(); + let (gateway_approved_command_pdas, instructions): (Vec<_>, Vec<_>) = commands .iter() .map(|command| { let (gateway_approved_message_pda, _bump, _seeds) = - GatewayApprovedCommand::pda(gateway_config_pda.as_ref(), command); - let ix = gmp_gateway::instructions::initialize_pending_command( - gateway_config_pda.as_ref(), + GatewayApprovedCommand::pda(inputs.gateway_config_pda.as_ref(), command); + let instruction = gmp_gateway::instructions::initialize_pending_command( + inputs.gateway_config_pda.as_ref(), &pubkey, command.clone(), ) .unwrap(); - (gateway_approved_message_pda, ix) + (gateway_approved_message_pda, instruction) }) .unzip(); - submit_transaction(validator_rpc_client.clone(), keypair.clone(), &ixs).await?; + let (init_pending_cmd_tx_size, cus) = submit_transaction( + inputs.validator_rpc_client.clone(), + inputs.keypair.clone(), + &instructions, + true, + ) + .await?; - let approve_messages_ix = gmp_gateway::instructions::approve_messages( + row_builder.init_pending_cmd_tx_size(init_pending_cmd_tx_size); + + Ok((gateway_approved_command_pdas, cus.unwrap())) +} + +async fn do_approve_messages( + inputs: &IterationInputs, + execute_data_pda: Pubkey, + gateway_approved_command_pdas: &[Pubkey], + row_builder: &mut RowBuilder, +) -> Result { + let approve_messages_instruction = gmp_gateway::instructions::approve_messages( execute_data_pda, - *gateway_config_pda, - &gateway_approved_command_pdas, - signers.verifier_set_tracker(), - ) - .expect("Failed to create approve messages instruction"); - let bump_budget = ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX); - submit_transaction( - validator_rpc_client.clone(), - keypair.clone(), - &[bump_budget.clone(), approve_messages_ix], + *inputs.gateway_config_pda, + gateway_approved_command_pdas, + inputs.signers.verifier_set_tracker(), + )?; + + let (approve_messages_tx_size, cus) = submit_transaction( + inputs.validator_rpc_client.clone(), + inputs.keypair.clone(), + &[ + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + approve_messages_instruction, + ], + true, ) .await?; - for (m, data_payload, gateway_approved_command_pda) in + row_builder.approve_messages_tx_size(approve_messages_tx_size); + + Ok(cus.unwrap()) +} + +async fn do_memo_program_calls( + inputs: &IterationInputs, + messages: Vec, + data_payloads: &[DataPayload<'_>], + gateway_approved_command_pdas: Vec, + row_builder: &mut RowBuilder, +) -> Result { + let mut memo_call_tx_size = 0; + let mut total_compute_units = 0; + + for (message, data_payload, gateway_approved_command_pda) in izip!(messages, data_payloads, gateway_approved_command_pdas) { - let ix = axelar_executable::construct_axelar_executable_ix( - m, - data_payload.encode().unwrap(), + let instruction = axelar_executable::construct_axelar_executable_ix( + message, + data_payload.encode()?, gateway_approved_command_pda, - *gateway_config_pda, - ) - .unwrap(); - - submit_transaction( - validator_rpc_client.clone(), - keypair.clone(), - &[bump_budget.clone(), ix], + *inputs.gateway_config_pda, + )?; + + let (transaction_size, compute_units) = submit_transaction( + inputs.validator_rpc_client.clone(), + inputs.keypair.clone(), + &[ + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + instruction, + ], + true, ) .await?; + + // They should all be the same size + memo_call_tx_size = transaction_size.max(memo_call_tx_size); + total_compute_units += compute_units.unwrap(); } - Ok(()) + row_builder.memo_call_tx_size(memo_call_tx_size); + + Ok(total_compute_units) +} + +async fn try_iteration_with_params(inputs: IterationInputs) -> Result { + let mut total_compute_units = 0; + let mut csv_row_builder = RowBuilder::default(); + csv_row_builder + .num_signers(inputs.num_signers) + .batch_size(inputs.batch_size) + .num_accounts(inputs.num_additional_accounts + 1) + .message_size(inputs.message_size); + let payload_data: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(inputs.message_size) + .map(char::from) + .collect(); + let (messages, data_payloads): (Vec, Vec>) = (0..inputs.batch_size) + .map(|_| { + make_message_with_payload_data( + payload_data.as_bytes(), + *inputs.counter_pda, + inputs.num_additional_accounts, + inputs.encoding, + ) + }) + .unzip(); + let (payload, commands) = payload_and_commands(&messages); + let (execute_data_pda, compute_units) = + do_init_approve_messages_execute_data(&inputs, payload, &mut csv_row_builder).await?; + + total_compute_units += compute_units; + + let (gateway_approved_command_pdas, compute_units) = + do_init_pending_commands(&inputs, commands, &mut csv_row_builder).await?; + + total_compute_units += compute_units; + + total_compute_units += do_approve_messages( + &inputs, + execute_data_pda, + &gateway_approved_command_pdas, + &mut csv_row_builder, + ) + .await?; + + total_compute_units += do_memo_program_calls( + &inputs, + messages, + &data_payloads, + gateway_approved_command_pdas, + &mut csv_row_builder, + ) + .await?; + + csv_row_builder.total_compute_units(total_compute_units); + + Ok(csv_row_builder.build()?) +} + +async fn evaluate_iteration_with_side_effects( + result: Result, + writer: Arc>>, + batch_size: usize, + num_addditional_accounts: usize, + message_size: usize, +) { + match result { + Err(Error::RpcClient(RpcClientError { + request: _, + kind: + ClientErrorKind::RpcError(RpcError::RpcResponseError { + code: MESSAGE_SIZE_EXCEEDED_ERROR, + message, + .. + }), + })) => { + tracing::error!("{message}"); + + match (batch_size, num_addditional_accounts, message_size) { + // In case we are at the first iteration of both inner loops and we fail, we + // reached the overall limit and should stop running. + (MIN_MESSAGES_PER_BATCH, MIN_ACCOUNTS_AMOUNT, MIN_MESSAGE_SIZE) => { + BREAK_SIGNERS_SIZE.store(true, Ordering::Relaxed); + } + + // In case we fail within the first iteration of the two innermost loops but not + // the first iteration of the batch_size loop, it means we've stressed out the + // possible combination of batch size, number of accounts and message size for the + // current number of signers, so we break the loop and move to the next number of + // signers. + (_, MIN_ACCOUNTS_AMOUNT, MIN_MESSAGE_SIZE) => { + BREAK_BATCH_SIZE.store(true, Ordering::Relaxed); + } + + // In case we fail within the first iteration of the innermost loop but not the + // first iteration of the other loops, it means we've stressed out the possible + // combination of message size and number of accounts for the + // current batch size and number of signers, so we break the loop + // and move to the next batch size. + (_, _, MIN_MESSAGE_SIZE) => BREAK_ACCOUNTS_SIZE.store(true, Ordering::Relaxed), + + // In case we fail and it's not first iteration, just break the + // innermost loop and try the next batch size. + (_, _, _) => BREAK_MESSAGE_SIZE.store(true, Ordering::Relaxed), + }; + } + Ok(csv_row) => { + writer + .lock() + .await + .serialize(csv_row) + .await + .expect("Failed to write csv row"); + } + Err(error) => { + panic!("Unexpected error occurred: {error}") + } + }; } async fn submit_transaction( rpc_client: Arc, wallet_signer: Arc, instructions: &[Instruction], -) -> Result { + get_compute_units: bool, +) -> Result<(usize, Option), Error> { let recent_blockhash = rpc_client.get_latest_blockhash().await?; let transaction = Transaction::new_signed_with_payer( instructions, @@ -325,18 +557,68 @@ async fn submit_transaction( &[&wallet_signer], recent_blockhash, ); - rpc_client.send_and_confirm_transaction(&transaction).await + let transaction_encoded = transaction.encode(UiTransactionEncoding::Base64); + let tx_size = match transaction_encoded { + solana_transaction_status::EncodedTransaction::LegacyBinary(b) + | solana_transaction_status::EncodedTransaction::Binary(b, _) => b.len(), + _ => 0, + }; + + let signature = rpc_client + .send_and_confirm_transaction(&transaction) + .await?; + + let compute_units = if get_compute_units { + // Loop until we get the confirmed transaction metadata. + let transaction = loop { + match rpc_client + .get_transaction_with_config( + &signature, + RpcTransactionConfig { + encoding: Some(UiTransactionEncoding::Json), + commitment: Some(CommitmentConfig::confirmed()), + max_supported_transaction_version: None, + }, + ) + .await + { + Ok(confirmed_tx) => break confirmed_tx.transaction, + Err(e) => tracing::error!( + "Error trying to fetch transaction information: {e}\nRetrying..." + ), + } + }; + + Some( + transaction + .meta + .and_then(|meta| Option::from(meta.compute_units_consumed)) + .ok_or(Error::TransactionMissingMetadata)?, + ) + } else { + None + }; + + Ok((tx_size, compute_units)) } async fn clean_ledger_setup_validator() -> (TestValidator, Keypair) { if PathBuf::from_str(LEDGER_PATH).unwrap().exists() { - std::fs::remove_dir_all(LEDGER_PATH).unwrap(); + let _ = std::fs::remove_dir_all(LEDGER_PATH).inspect_err(|e| { + tracing::warn!("Failed to remove ledger directory: {e}"); + }); } setup_validator().await } async fn setup_validator() -> (TestValidator, Keypair) { let mut seed_validator = TestValidatorGenesis::default(); + let mut rpc_config = JsonRpcConfig::default_for_test(); + + rpc_config.enable_rpc_transaction_history = true; + + seed_validator.rpc_config(rpc_config); + let gateway_program_id = gmp_gateway::id(); let gateway_program_path = super::path::contracts_artifact_dir().join(SolanaContract::GmpGateway.file()); @@ -364,12 +646,29 @@ async fn setup_validator() -> (TestValidator, Keypair) { .await } -fn make_message_with_payload_data(data: &[u8], counter_pda: Pubkey) -> (Message, DataPayload<'_>) { - let payload = DataPayload::new( - data, - &[AccountMeta::new(counter_pda, false)], - EncodingScheme::Borsh, +fn get_filename(encoding: EncodingScheme) -> String { + match encoding { + EncodingScheme::AbiEncoding => format!("abi_encoding_{OUTPUT_FILENAME}"), + EncodingScheme::Borsh => format!("borsh_encoding_{OUTPUT_FILENAME}"), + _ => OUTPUT_FILENAME.to_string(), + } +} + +fn make_message_with_payload_data( + data: &[u8], + counter_pda: Pubkey, + num_addditional_accounts: usize, + encoding: EncodingScheme, +) -> (Message, DataPayload<'_>) { + let accounts = (0..num_addditional_accounts).fold( + vec![AccountMeta::new(counter_pda, false)], + |mut acc, _| { + acc.push(AccountMeta::new(Pubkey::new_unique(), false)); + acc + }, ); + + let payload = DataPayload::new(data, &accounts, encoding); let message = custom_message(axelar_solana_memo_program::id(), &payload); (message, payload)