diff --git a/Cargo.lock b/Cargo.lock index 0154b689416e5..3b429c2e148c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1375,12 +1375,13 @@ dependencies = [ "aptos-metrics-core", "derive_more", "once_cell", + "rayon", "threadpool", ] [[package]] name = "aptos-dynamic-transaction-composer" -version = "0.1.0" +version = "0.1.1" dependencies = [ "anyhow", "aptos-types", @@ -1395,7 +1396,6 @@ dependencies = [ "serde", "serde_bytes", "serde_json", - "tsify-next", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -1600,6 +1600,7 @@ dependencies = [ "aptos-crypto", "aptos-drop-helper", "aptos-infallible", + "aptos-metrics-core", "aptos-scratchpad", "aptos-secure-net", "aptos-storage-interface", @@ -3496,6 +3497,7 @@ dependencies = [ "aptos-build-info", "aptos-crypto", "aptos-framework", + "aptos-gas-profiling", "aptos-gas-schedule", "aptos-gas-schedule-updator", "aptos-genesis", @@ -8800,19 +8802,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gloo-utils" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" -dependencies = [ - "js-sys", - "serde", - "serde_json", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "goldenfile" version = "1.6.0" @@ -15274,17 +15263,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "serde_derive_internals" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "serde_json" version = "1.0.114" @@ -17227,31 +17205,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "tsify-next" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4a645dca4ee0800f5ab60ce166deba2db6a0315de795a2691e138a3d55d756" -dependencies = [ - "gloo-utils", - "serde", - "serde_json", - "tsify-next-macros", - "wasm-bindgen", -] - -[[package]] -name = "tsify-next-macros" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d5c06f8a51d759bb58129e30b2631739e7e1e4579fad1f30ac09a6c88e488a6" -dependencies = [ - "proc-macro2", - "quote", - "serde_derive_internals", - "syn 2.0.87", -] - [[package]] name = "tui" version = "0.19.0" diff --git a/aptos-move/aptos-gas-calibration/src/measurements.rs b/aptos-move/aptos-gas-calibration/src/measurements.rs index fa0257ce29a0c..df62baca470cf 100644 --- a/aptos-move/aptos-gas-calibration/src/measurements.rs +++ b/aptos-move/aptos-gas-calibration/src/measurements.rs @@ -162,15 +162,17 @@ fn compile_and_run_samples_ir( .equation_names .push(format!("{}::{}", &identifier, func_identifier.0)); - let elapsed = executor.exec_func_record_running_time( - &module_id, - &func_identifier.0, - vec![], - func_identifier.1.clone(), - iterations, - ExecFuncTimerDynamicArgs::NoArgs, - GasMeterType::UnmeteredGasMeter, - ); + let elapsed = executor + .exec_func_record_running_time( + &module_id, + &func_identifier.0, + vec![], + func_identifier.1.clone(), + iterations, + ExecFuncTimerDynamicArgs::NoArgs, + GasMeterType::UnmeteredGasMeter, + ) + .elapsed_micros(); gas_measurement.regular_meter.push(elapsed); // record with abstract gas meter diff --git a/aptos-move/aptos-gas-calibration/src/measurements_helpers.rs b/aptos-move/aptos-gas-calibration/src/measurements_helpers.rs index 3f242b4457d4e..987cf91799d4a 100644 --- a/aptos-move/aptos-gas-calibration/src/measurements_helpers.rs +++ b/aptos-move/aptos-gas-calibration/src/measurements_helpers.rs @@ -94,15 +94,17 @@ pub fn execute_user_txn( iterations: u64, args: Vec>, ) -> u128 { - let elapsed = executor.exec_func_record_running_time( - module_name, - function_name, - vec![], - args, - iterations, - ExecFuncTimerDynamicArgs::NoArgs, - GasMeterType::UnmeteredGasMeter, - ); + let elapsed = executor + .exec_func_record_running_time( + module_name, + function_name, + vec![], + args, + iterations, + ExecFuncTimerDynamicArgs::NoArgs, + GasMeterType::UnmeteredGasMeter, + ) + .elapsed_micros(); println!("running time (microseconds): {}", elapsed); elapsed } diff --git a/aptos-move/aptos-release-builder/Cargo.toml b/aptos-move/aptos-release-builder/Cargo.toml index 42f02244848ab..604335b629dfd 100644 --- a/aptos-move/aptos-release-builder/Cargo.toml +++ b/aptos-move/aptos-release-builder/Cargo.toml @@ -18,6 +18,7 @@ aptos = { workspace = true, features = [ "no-upload-proposal" ] } aptos-build-info = { workspace = true } aptos-crypto = { workspace = true } aptos-framework = { workspace = true } +aptos-gas-profiling = { workspace = true } aptos-gas-schedule = { workspace = true } aptos-gas-schedule-updator = { workspace = true } aptos-genesis = { workspace = true } diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index cf161ac8c8947..b980063aa1fd7 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -282,7 +282,7 @@ impl From for AptosFeatureFlag { }, FeatureFlag::Bn254Structures => AptosFeatureFlag::BN254_STRUCTURES, FeatureFlag::WebAuthnSignature => AptosFeatureFlag::WEBAUTHN_SIGNATURE, - FeatureFlag::ReconfigureWithDkg => AptosFeatureFlag::RECONFIGURE_WITH_DKG, + FeatureFlag::ReconfigureWithDkg => AptosFeatureFlag::_DEPRECATED_RECONFIGURE_WITH_DKG, FeatureFlag::KeylessAccounts => AptosFeatureFlag::KEYLESS_ACCOUNTS, FeatureFlag::KeylessButZklessAccounts => AptosFeatureFlag::KEYLESS_BUT_ZKLESS_ACCOUNTS, FeatureFlag::RemoveDetailedError => AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH, @@ -427,7 +427,7 @@ impl From for FeatureFlag { }, AptosFeatureFlag::BN254_STRUCTURES => FeatureFlag::Bn254Structures, AptosFeatureFlag::WEBAUTHN_SIGNATURE => FeatureFlag::WebAuthnSignature, - AptosFeatureFlag::RECONFIGURE_WITH_DKG => FeatureFlag::ReconfigureWithDkg, + AptosFeatureFlag::_DEPRECATED_RECONFIGURE_WITH_DKG => FeatureFlag::ReconfigureWithDkg, AptosFeatureFlag::KEYLESS_ACCOUNTS => FeatureFlag::KeylessAccounts, AptosFeatureFlag::KEYLESS_BUT_ZKLESS_ACCOUNTS => FeatureFlag::KeylessButZklessAccounts, AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH => FeatureFlag::RemoveDetailedError, diff --git a/aptos-move/aptos-release-builder/src/main.rs b/aptos-move/aptos-release-builder/src/main.rs index 8352800e81cd7..57cde7cee78bb 100644 --- a/aptos-move/aptos-release-builder/src/main.rs +++ b/aptos-move/aptos-release-builder/src/main.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use anyhow::Context; +use anyhow::{bail, Context}; use aptos_crypto::{ed25519::Ed25519PrivateKey, ValidCryptoMaterialStringExt}; use aptos_framework::natives::code::PackageRegistry; use aptos_gas_schedule::LATEST_GAS_FEATURE_VERSION; @@ -69,13 +69,22 @@ impl NetworkSelection { pub enum Commands { /// Generate sets of governance proposals based on the release_config file passed in GenerateProposals { + /// Path to the release config. #[clap(short, long)] release_config: PathBuf, + + /// Output directory to store the generated artifacts. #[clap(short, long)] output_dir: PathBuf, + /// If set, simulate the governance proposals after generation. #[clap(long)] simulate: Option, + + /// Set this flag to enable the gas profiler. + /// Can only be used in combination with `--simulate`. + #[clap(long)] + profile_gas: Option, }, /// Simulate a multi-step proposal on the specified network, using its current states. /// The simulation will execute the governance scripts, as if the proposal is already @@ -91,6 +100,10 @@ pub enum Commands { /// Possible values: devnet, testnet, mainnet, #[clap(long)] network: NetworkSelection, + + /// Set this flag to enable the gas profiler + #[clap(long, default_value_t = false)] + profile_gas: bool, }, /// Generate sets of governance proposals with default release config. WriteDefault { @@ -184,6 +197,7 @@ async fn main() -> anyhow::Result<()> { release_config, output_dir, simulate, + profile_gas, } => { aptos_release_builder::ReleaseConfig::load_config(release_config.as_path()) .with_context(|| "Failed to load release config".to_string())? @@ -191,15 +205,28 @@ async fn main() -> anyhow::Result<()> { .await .with_context(|| "Failed to generate release proposal scripts".to_string())?; - if let Some(network) = simulate { - let remote_endpoint = network.to_url()?; - simulate_all_proposals(remote_endpoint, output_dir.as_path()).await?; + match simulate { + Some(network) => { + let profile_gas = profile_gas.unwrap_or(false); + let remote_endpoint = network.to_url()?; + simulate_all_proposals(remote_endpoint, output_dir.as_path(), profile_gas) + .await?; + }, + None => { + if profile_gas.is_some() { + bail!("--profile-gas can only be set in combination with --simulate") + } + }, } Ok(()) }, - Commands::Simulate { network, path } => { - simulate_all_proposals(network.to_url()?, &path).await?; + Commands::Simulate { + network, + path, + profile_gas, + } => { + simulate_all_proposals(network.to_url()?, &path, profile_gas).await?; Ok(()) }, Commands::WriteDefault { output_path } => { diff --git a/aptos-move/aptos-release-builder/src/simulate.rs b/aptos-move/aptos-release-builder/src/simulate.rs index f37c8edf49c0d..dcfdbb7d6d2fc 100644 --- a/aptos-move/aptos-release-builder/src/simulate.rs +++ b/aptos-move/aptos-release-builder/src/simulate.rs @@ -26,6 +26,7 @@ use aptos::{ common::types::PromptOptions, governance::compile_in_temp_dir, move_tool::FrameworkPackageArgs, }; use aptos_crypto::HashValue; +use aptos_gas_profiling::GasProfiler; use aptos_gas_schedule::{AptosGasParameters, FromOnChainGasSchedule}; use aptos_language_e2e_tests::account::AccountData; use aptos_move_debugger::aptos_debugger::AptosDebugger; @@ -510,6 +511,7 @@ pub async fn simulate_multistep_proposal( remote_url: Url, proposal_dir: &Path, proposal_scripts: &[PathBuf], + profile_gas: bool, ) -> Result<()> { println!("Simulating proposal at {}", proposal_dir.display()); @@ -626,28 +628,51 @@ pub async fn simulate_multistep_proposal( let resolver = state_view.as_move_resolver(); let code_storage = state_view.as_aptos_code_storage(env); - let (_vm_status, vm_output) = vm.execute_user_transaction( - &resolver, - &code_storage, - &account - .account() - .transaction() - .script(Script::new(script_blob, vec![], vec![ - TransactionArgument::U64(DUMMY_PROPOSAL_ID), // dummy proposal id, ignored by the patched function - ])) - .chain_id(chain_id.chain_id()) - .sequence_number(script_idx as u64) - .gas_unit_price(gas_params.vm.txn.min_price_per_gas_unit.into()) - .max_gas_amount(100000) - .ttl(u64::MAX) - .sign(), - &log_context, - ); + let txn = account + .account() + .transaction() + .script(Script::new(script_blob, vec![], vec![ + TransactionArgument::U64(DUMMY_PROPOSAL_ID), // dummy proposal id, ignored by the patched function + ])) + .chain_id(chain_id.chain_id()) + .sequence_number(script_idx as u64) + .gas_unit_price(gas_params.vm.txn.min_price_per_gas_unit.into()) + .max_gas_amount(100000) + .ttl(u64::MAX) + .sign(); + + let vm_output = if !profile_gas { + let (_vm_status, vm_output) = + vm.execute_user_transaction(&resolver, &code_storage, &txn, &log_context); + vm_output + } else { + let (_vm_status, vm_output, gas_profiler) = vm + .execute_user_transaction_with_modified_gas_meter( + &resolver, + &code_storage, + &txn, + &log_context, + GasProfiler::new_script, + )?; + + let gas_log = gas_profiler.finish(); + let report_path = proposal_dir + .join("gas-profiling") + .join(script_path.file_stem().unwrap()); + gas_log.generate_html_report(&report_path, format!("Gas Report - {}", script_name))?; + + println!(" Gas report saved to {}", report_path.display()); + + vm_output + }; // TODO: ensure all scripts trigger reconfiguration. let txn_output = vm_output .try_materialize_into_transaction_output(&resolver) .context("failed to materialize transaction output")?; + + println!(" Gas used: {}", txn_output.gas_used()); + let txn_status = txn_output.status(); match txn_status { TransactionStatus::Keep(ExecutionStatus::Success) => { @@ -710,7 +735,11 @@ pub fn collect_proposals(root_dir: &Path) -> Result)> Ok(result) } -pub async fn simulate_all_proposals(remote_url: Url, output_dir: &Path) -> Result<()> { +pub async fn simulate_all_proposals( + remote_url: Url, + output_dir: &Path, + profile_gas: bool, +) -> Result<()> { let proposals = collect_proposals(output_dir).context("failed to collect proposals for simulation")?; @@ -735,11 +764,14 @@ pub async fn simulate_all_proposals(remote_url: Url, output_dir: &Path) -> Resul } for (proposal_dir, proposal_scripts) in &proposals { - simulate_multistep_proposal(remote_url.clone(), proposal_dir, proposal_scripts) - .await - .with_context(|| { - format!("failed to simulate proposal at {}", proposal_dir.display()) - })?; + simulate_multistep_proposal( + remote_url.clone(), + proposal_dir, + proposal_scripts, + profile_gas, + ) + .await + .with_context(|| format!("failed to simulate proposal at {}", proposal_dir.display()))?; } println!("All proposals succeeded!"); diff --git a/aptos-move/aptos-vm-benchmarks/src/helper.rs b/aptos-move/aptos-vm-benchmarks/src/helper.rs index aea964f3e20e2..de4f8d8c06f6d 100644 --- a/aptos-move/aptos-vm-benchmarks/src/helper.rs +++ b/aptos-move/aptos-vm-benchmarks/src/helper.rs @@ -64,15 +64,17 @@ pub fn execute_module_txn( // sign user transaction and only records the body of the transaction pub fn execute_user_txn(executor: &mut FakeExecutor, module_name: &ModuleId, function_name: &str) { - let elapsed = executor.exec_func_record_running_time( - module_name, - function_name, - vec![], - vec![], - 10, - ExecFuncTimerDynamicArgs::NoArgs, - GasMeterType::UnmeteredGasMeter, - ); + let elapsed = executor + .exec_func_record_running_time( + module_name, + function_name, + vec![], + vec![], + 10, + ExecFuncTimerDynamicArgs::NoArgs, + GasMeterType::UnmeteredGasMeter, + ) + .elapsed_micros(); println!("running time (microseconds): {}", elapsed); } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index d79ae3db81895..cc94c805c21aa 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -1654,7 +1654,12 @@ impl AptosVM { let check_friend_linking = !self .features() .is_enabled(FeatureFlag::TREAT_FRIEND_AS_PRIVATE); - let compatability_checks = Compatibility::new(check_struct_layout, check_friend_linking); + let compatibility_checks = Compatibility::new( + check_struct_layout, + check_friend_linking, + self.timed_features() + .is_enabled(TimedFeatureFlag::EntryCompatibility), + ); if self.features().is_loader_v2_enabled() { session.finish_with_module_publishing_and_initialization( @@ -1667,7 +1672,7 @@ impl AptosVM { destination, bundle, modules, - compatability_checks, + compatibility_checks, ) } else { // Check what modules exist before publishing. @@ -1691,7 +1696,7 @@ impl AptosVM { bundle.into_inner(), destination, gas_meter, - compatability_checks, + compatibility_checks, ) })?; @@ -3080,13 +3085,44 @@ pub(crate) fn fetch_module_metadata_for_struct_tag( } } -#[test] -fn vm_thread_safe() { - fn assert_send() {} - fn assert_sync() {} +#[cfg(test)] +mod tests { + use crate::{move_vm_ext::MoveVmExt, AptosVM}; + use aptos_types::{ + account_address::AccountAddress, + account_config::{NEW_EPOCH_EVENT_MOVE_TYPE_TAG, NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG}, + contract_event::ContractEvent, + event::EventKey, + }; + + #[test] + fn vm_thread_safe() { + fn assert_send() {} + fn assert_sync() {} + + assert_send::(); + assert_sync::(); + assert_send::(); + assert_sync::(); + } - assert_send::(); - assert_sync::(); - assert_send::(); - assert_sync::(); + #[test] + fn should_restart_execution_on_new_epoch() { + let new_epoch_event = ContractEvent::new_v1( + EventKey::new(0, AccountAddress::ONE), + 0, + NEW_EPOCH_EVENT_MOVE_TYPE_TAG.clone(), + vec![], + ); + let new_epoch_event_v2 = + ContractEvent::new_v2(NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG.clone(), vec![]); + assert!(AptosVM::should_restart_execution(&[( + new_epoch_event, + None + )])); + assert!(AptosVM::should_restart_execution(&[( + new_epoch_event_v2, + None + )])); + } } diff --git a/aptos-move/e2e-benchmark/src/main.rs b/aptos-move/e2e-benchmark/src/main.rs index 76e98fc61d63f..51b2d00e293fb 100644 --- a/aptos-move/e2e-benchmark/src/main.rs +++ b/aptos-move/e2e-benchmark/src/main.rs @@ -3,7 +3,7 @@ use aptos_language_e2e_tests::{ account::Account, - executor::{ExecFuncTimerDynamicArgs, FakeExecutor, GasMeterType}, + executor::{ExecFuncTimerDynamicArgs, FakeExecutor, GasMeterType, Measurement}, }; use aptos_transaction_generator_lib::{ publishing::{ @@ -46,7 +46,7 @@ fn execute_and_time_entry_point( publisher_address: &AccountAddress, executor: &mut FakeExecutor, iterations: u64, -) -> u128 { +) -> Measurement { let mut rng = StdRng::seed_from_u64(14); let entry_fun = entry_point .create_payload( @@ -77,9 +77,9 @@ fn execute_and_time_entry_point( ) } -const ALLOWED_REGRESSION: f32 = 0.15; -const ALLOWED_IMPROVEMENT: f32 = 0.15; -const ABSOLUTE_BUFFER_US: f32 = 2.0; +const ALLOWED_REGRESSION: f64 = 0.15; +const ALLOWED_IMPROVEMENT: f64 = 0.15; +const ABSOLUTE_BUFFER_US: f64 = 2.0; const CALIBRATION_VALUES: &str = " Loop { loop_count: Some(100000), loop_type: NoOp } 6 0.988 1.039 41212.4 @@ -88,9 +88,9 @@ CreateObjects { num_objects: 10, object_payload_size: 0 } 6 0.940 1.026 152.1 CreateObjects { num_objects: 10, object_payload_size: 10240 } 6 0.934 1.051 9731.3 CreateObjects { num_objects: 100, object_payload_size: 0 } 6 0.966 1.051 1458.3 CreateObjects { num_objects: 100, object_payload_size: 10240 } 6 0.969 1.077 11196.4 -InitializeVectorPicture { length: 40 } 6 0.973 1.066 75.0 -VectorPicture { length: 40 } 6 0.955 1.092 22.0 -VectorPictureRead { length: 40 } 6 0.952 1.047 21.0 +InitializeVectorPicture { length: 128 } 6 0.973 1.066 170.3 +VectorPicture { length: 128 } 6 0.955 1.092 46.2 +VectorPictureRead { length: 128 } 6 0.952 1.047 45.1 InitializeVectorPicture { length: 30720 } 6 0.969 1.071 27295.8 VectorPicture { length: 30720 } 6 0.957 1.066 6560.2 VectorPictureRead { length: 30720 } 6 0.948 1.053 6642.8 @@ -103,11 +103,16 @@ TokenV1MintAndTransferNFTSequential 6 0.991 1.067 543.7 TokenV2AmbassadorMint { numbered: true } 6 0.987 1.052 474.4 LiquidityPoolSwap { is_stable: true } 6 0.970 1.042 555.4 LiquidityPoolSwap { is_stable: false } 6 0.925 1.001 535.3 +CoinInitAndMint 6 0.925 1.001 197.1 +FungibleAssetMint 6 0.925 1.001 231.6 +IncGlobalMilestoneAggV2 { milestone_every: 1 } 6 0.925 1.001 33.3 +IncGlobalMilestoneAggV2 { milestone_every: 2 } 6 0.925 1.001 19.1 +EmitEvents { count: 1000 } 6 0.925 1.001 8493.7 "; struct CalibrationInfo { // count: usize, - expected_time: f32, + expected_time_micros: f64, } fn get_parsed_calibration_values() -> HashMap { @@ -118,7 +123,7 @@ fn get_parsed_calibration_values() -> HashMap { let parts = line.split('\t').collect::>(); (parts[0].to_string(), CalibrationInfo { // count: parts[1].parse().unwrap(), - expected_time: parts[parts.len() - 1].parse().unwrap(), + expected_time_micros: parts[parts.len() - 1].parse().unwrap(), }) }) .collect() @@ -146,7 +151,7 @@ fn main() { loop_type: LoopType::Arithmetic, }, // This is a cheap bcs (serializing vec), so not representative of what BCS native call should cost. - // (, EntryPoints::Loop { loop_count: Some(1000), loop_type: LoopType::BCS { len: 1024 }}), + // (, EntryPoints::Loop { loop_count: Some(1000), loop_type: LoopType::BcsToBytes { len: 1024 }}), EntryPoints::CreateObjects { num_objects: 10, object_payload_size: 0, @@ -163,9 +168,9 @@ fn main() { num_objects: 100, object_payload_size: 10 * 1024, }, - EntryPoints::InitializeVectorPicture { length: 40 }, - EntryPoints::VectorPicture { length: 40 }, - EntryPoints::VectorPictureRead { length: 40 }, + EntryPoints::InitializeVectorPicture { length: 128 }, + EntryPoints::VectorPicture { length: 128 }, + EntryPoints::VectorPictureRead { length: 128 }, EntryPoints::InitializeVectorPicture { length: 30 * 1024 }, EntryPoints::VectorPicture { length: 30 * 1024 }, EntryPoints::VectorPictureRead { length: 30 * 1024 }, @@ -188,22 +193,27 @@ fn main() { EntryPoints::TokenV2AmbassadorMint { numbered: true }, EntryPoints::LiquidityPoolSwap { is_stable: true }, EntryPoints::LiquidityPoolSwap { is_stable: false }, + EntryPoints::CoinInitAndMint, + EntryPoints::FungibleAssetMint, + EntryPoints::IncGlobalMilestoneAggV2 { milestone_every: 1 }, + EntryPoints::IncGlobalMilestoneAggV2 { milestone_every: 2 }, + EntryPoints::EmitEvents { count: 1000 }, ]; let mut failures = Vec::new(); let mut json_lines = Vec::new(); println!( - "{:>15} {:>15} {:>15} entry point", - "wall time (us)", "expected (us)", "diff(- is impr)" + "{:>13} {:>13} {:>13}{:>13} {:>13} {:>13} entry point", + "walltime(us)", "expected(us)", "dif(- is impr)", "gas/s", "exe gas", "io gas", ); for (index, entry_point) in entry_points.into_iter().enumerate() { let entry_point_name = format!("{:?}", entry_point); - let expected_time = calibration_values + let expected_time_micros = calibration_values .get(&entry_point_name) - .unwrap() - .expected_time; + .expect(&entry_point_name) + .expected_time_micros; let publisher = executor.new_account_at(AccountAddress::random()); let mut package_handler = PackageHandler::new(entry_point.package_name()); @@ -215,7 +225,6 @@ fn main() { 0, package.publish_transaction_payload(), ); - println!("Published package: {:?}", entry_point.package_name()); if let Some(init_entry_point) = entry_point.initialize_entry_point() { execute_txn( &mut executor, @@ -228,52 +237,59 @@ fn main() { Some(publisher.address()), ), ); - println!( - "Executed init entry point: {:?}", - entry_point.initialize_entry_point() - ); } - let elapsed_micros = execute_and_time_entry_point( + let measurement = execute_and_time_entry_point( &entry_point, &package, publisher.address(), &mut executor, - if expected_time > 10000.0 { + if expected_time_micros > 10000.0 { 6 - } else if expected_time > 1000.0 { + } else if expected_time_micros > 1000.0 { 10 } else { 100 }, ); - let diff = (elapsed_micros as f32 - expected_time) / expected_time * 100.0; + let elapsed_micros = measurement.elapsed_micros_f64(); + let diff = (elapsed_micros - expected_time_micros) / expected_time_micros * 100.0; + let execution_gas_units = measurement.execution_gas_units(); + let io_gas_units = measurement.io_gas_units(); + let gps = (execution_gas_units + io_gas_units) / measurement.elapsed_secs_f64(); println!( - "{:15} {:15.1} {:14.1}% {:?}", - elapsed_micros, expected_time, diff, entry_point + "{:13.1} {:13.1} {:12.1}% {:13.0} {:13.2} {:13.2} {:?}", + elapsed_micros, + expected_time_micros, + diff, + gps, + execution_gas_units, + io_gas_units, + entry_point ); json_lines.push(json!({ "grep": "grep_json_aptos_move_vm_perf", "transaction_type": entry_point_name, "wall_time_us": elapsed_micros, - "expected_wall_time_us": expected_time, + "gas_units_per_second": gps, + "execution_gas_units": execution_gas_units, + "io_gas_units": io_gas_units, + "expected_wall_time_us": expected_time_micros, "test_index": index, })); - if elapsed_micros as f32 - > expected_time as f32 * (1.0 + ALLOWED_REGRESSION) + ABSOLUTE_BUFFER_US - { + if elapsed_micros > expected_time_micros * (1.0 + ALLOWED_REGRESSION) + ABSOLUTE_BUFFER_US { failures.push(format!( - "Performance regression detected: {}us, expected: {}us, diff: {}%, for {:?}", - elapsed_micros, expected_time, diff, entry_point + "Performance regression detected: {:.1}us, expected: {:.1}us, diff: {}%, for {:?}", + elapsed_micros, expected_time_micros, diff, entry_point )); - } else if elapsed_micros as f32 + ABSOLUTE_BUFFER_US - < expected_time as f32 * (1.0 - ALLOWED_IMPROVEMENT) + } else if elapsed_micros + ABSOLUTE_BUFFER_US + < expected_time_micros * (1.0 - ALLOWED_IMPROVEMENT) { failures.push(format!( - "Performance improvement detected: {}us, expected {}us, diff: {}%, for {:?}. You need to adjust expected time!", - elapsed_micros, expected_time, diff, entry_point + "Performance improvement detected: {:.1}us, expected {:.1}us, diff: {}%, for {:?}. You need to adjust expected time!", + elapsed_micros, expected_time_micros, diff, entry_point )); } } diff --git a/aptos-move/e2e-move-tests/src/lib.rs b/aptos-move/e2e-move-tests/src/lib.rs index 1b2bc1bc6a5fe..e20ad9972c038 100644 --- a/aptos-move/e2e-move-tests/src/lib.rs +++ b/aptos-move/e2e-move-tests/src/lib.rs @@ -59,6 +59,7 @@ pub(crate) fn build_package_with_compiler_version( compiler_version: CompilerVersion, ) -> anyhow::Result { let mut options = options; + options.language_version = Some(compiler_version.infer_stable_language_version()); options.compiler_version = Some(compiler_version); BuiltPackage::build(package_path.to_owned(), options) } diff --git a/aptos-move/e2e-move-tests/src/tests/mod.rs b/aptos-move/e2e-move-tests/src/tests/mod.rs index f9ab23563390c..63a5e1ff70eb5 100644 --- a/aptos-move/e2e-move-tests/src/tests/mod.rs +++ b/aptos-move/e2e-move-tests/src/tests/mod.rs @@ -54,6 +54,7 @@ mod token_event_store; mod token_objects; mod transaction_context; mod type_too_large; +mod upgrade_compatibility; mod vector_numeric_address; mod vm; mod vote; diff --git a/aptos-move/e2e-move-tests/src/tests/upgrade_compatibility.rs b/aptos-move/e2e-move-tests/src/tests/upgrade_compatibility.rs new file mode 100644 index 0000000000000..6bfadbdc289d7 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/upgrade_compatibility.rs @@ -0,0 +1,164 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for upgrade compatibility +//! +//! TODO: currently this contains only tests for friend entry functions, this should be extended +//! to test all compatibility rules in one place. + +// Note: this module uses parameterized tests via the +// [`rstest` crate](https://crates.io/crates/rstest) +// to test for multiple feature combinations. +// +// Currently, we are testing both the old and the new compatibility checker + +use crate::{assert_success, assert_vm_status, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_package_builder::PackageBuilder; +use aptos_types::{ + account_address::AccountAddress, on_chain_config::FeatureFlag, transaction::TransactionStatus, +}; +use move_core_types::vm_status::StatusCode; +use rstest::rstest; + +#[rstest(use_new_checker, case(false), case(true))] +fn private_non_entry(use_new_checker: bool) { + let result = check_upgrade("fun f(){}", "fun f(u: u16){}", use_new_checker); + assert_success!(result) +} + +#[rstest(use_new_checker, case(false), case(true))] +fn remove_function(use_new_checker: bool) { + let result = check_upgrade("fun f(){}", "", use_new_checker); + assert_success!(result); + + let result = check_upgrade("public fun f(){}", "", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade("public(friend) fun f(){}", "", use_new_checker); + assert_success!(result); + + let result = check_upgrade("entry fun f(){}", "", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade("public entry fun f(){}", "", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade("public(friend) entry fun f(){}", "", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); +} + +#[rstest(use_new_checker, case(false), case(true))] +fn change_function_signature(use_new_checker: bool) { + let result = check_upgrade("fun f(){}", "fun f(u: u16){}", use_new_checker); + assert_success!(result); + + let result = check_upgrade( + "public fun f(){}", + "public fun f(u: u16){}", + use_new_checker, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade( + "public(friend) fun f(){}", + "public(friend) fun f(u: u16){}", + use_new_checker, + ); + assert_success!(result); + + let result = check_upgrade("entry fun f(){}", "entry fun f(u: u16){}", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade( + "public entry fun f(){}", + "public entry fun f(u: u16){}", + use_new_checker, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + let result = check_upgrade( + "public(friend) entry fun f(){}", + "public(friend) entry fun f(u: u16){}", + use_new_checker, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); +} + +#[rstest(use_new_checker, case(false), case(true))] +fn friend_add_entry(use_new_checker: bool) { + let result = check_upgrade( + "public(friend) fun f(){}", + "public(friend) entry fun f(){}", + use_new_checker, + ); + assert_success!(result) +} + +#[rstest(use_new_checker, case(false), case(true))] +fn friend_remove_entry_failure(use_new_checker: bool) { + let result = check_upgrade( + "public(friend) entry fun f(){}", + "public(friend) fun f(){}", + use_new_checker, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE) +} + +#[rstest(use_new_checker, case(false), case(true))] +fn friend_remove_failure(use_new_checker: bool) { + let result = check_upgrade("public(friend) entry fun f(){}", "", use_new_checker); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE) +} + +#[rstest(use_new_checker, case(false), case(true))] +fn friend_entry_change_sig_failure(use_new_checker: bool) { + let result = check_upgrade( + "public(friend) entry fun f(){}", + "public(friend) entry fun f(_s: &signer){}", + use_new_checker, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE) +} + +fn check_upgrade(old_decls: &str, new_decls: &str, use_new_checker: bool) -> TransactionStatus { + let (enabled, disabled) = if use_new_checker { + (vec![FeatureFlag::USE_COMPATIBILITY_CHECKER_V2], vec![]) + } else { + (vec![], vec![FeatureFlag::USE_COMPATIBILITY_CHECKER_V2]) + }; + let mut builder = PackageBuilder::new("Package"); + let mut h = MoveHarness::new_with_features(enabled, disabled); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x815").unwrap()); + + // Publish for first time + builder.add_source( + "m.move", + &format!( + r#" + module 0x815::m {{ + {} + }} + "#, + old_decls + ), + ); + let path = builder.write_to_temp().unwrap(); + assert_success!(h.publish_package_with_options(&acc, path.path(), BuildOptions::move_2())); + + // Now upgrade + let mut builder = PackageBuilder::new("Package"); + builder.add_source( + "m.move", + &format!( + r#" + module 0x815::m {{ + {} + }} + "#, + new_decls + ), + ); + let path = builder.write_to_temp().unwrap(); + h.publish_package_with_options(&acc, path.path(), BuildOptions::move_2()) +} diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index ab1d4b7839513..9c9023c9c4b7d 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -21,7 +21,7 @@ use aptos_block_executor::{ use aptos_crypto::HashValue; use aptos_framework::ReleaseBundle; use aptos_gas_algebra::DynamicExpression; -use aptos_gas_meter::{StandardGasAlgebra, StandardGasMeter}; +use aptos_gas_meter::{AptosGasMeter, GasAlgebra, StandardGasAlgebra, StandardGasMeter}; use aptos_gas_profiling::{GasProfiler, TransactionGasLog}; use aptos_keygen::KeyGen; use aptos_types::{ @@ -89,7 +89,7 @@ use std::{ path::{Path, PathBuf}, str::FromStr, sync::{Arc, Mutex}, - time::Instant, + time::{Duration, Instant}, }; static RNG_SEED: [u8; 32] = [9u8; 32]; @@ -144,6 +144,39 @@ pub enum GasMeterType { UnmeteredGasMeter, } +#[derive(Clone)] +pub struct Measurement { + elapsed: Duration, + /// In internal gas units + execution_gas: u64, + /// In internal gas units + io_gas: u64, +} + +const GAS_SCALING_FACTOR: f64 = 1_000_000.0; + +impl Measurement { + pub fn elapsed_micros(&self) -> u128 { + self.elapsed.as_micros() + } + + pub fn elapsed_secs_f64(&self) -> f64 { + self.elapsed.as_secs_f64() + } + + pub fn elapsed_micros_f64(&self) -> f64 { + self.elapsed.as_secs_f64() * 1_000_000.0 + } + + pub fn execution_gas_units(&self) -> f64 { + self.execution_gas as f64 / GAS_SCALING_FACTOR + } + + pub fn io_gas_units(&self) -> f64 { + self.io_gas as f64 / GAS_SCALING_FACTOR + } +} + pub enum ExecFuncTimerDynamicArgs { NoArgs, DistinctSigners, @@ -973,7 +1006,7 @@ impl FakeExecutor { iterations: u64, dynamic_args: ExecFuncTimerDynamicArgs, gas_meter_type: GasMeterType, - ) -> u128 { + ) -> Measurement { let mut extra_accounts = match &dynamic_args { ExecFuncTimerDynamicArgs::DistinctSigners | ExecFuncTimerDynamicArgs::DistinctSignersAndFixed(_) => (0..iterations) @@ -993,7 +1026,7 @@ impl FakeExecutor { // start measuring here to reduce measurement errors (i.e., the time taken to load vm, module, etc.) let mut i = 0; - let mut times = Vec::new(); + let mut measurements = Vec::new(); while i < iterations { let mut session = vm.new_session(&resolver, SessionId::void(), None); @@ -1062,24 +1095,41 @@ impl FakeExecutor { let elapsed = start.elapsed(); if let Err(err) = result { if !should_error { - println!("Shouldn't error, but ignoring for now... {}", err); + println!( + "Entry function under measurement failed with an error. Continuing, but measurements are probably not what is expected. Error: {}", + err + ); } } - times.push(elapsed.as_micros()); + measurements.push(Measurement { + elapsed, + execution_gas: regular + .as_ref() + .map_or(0, |gas| gas.algebra().execution_gas_used().into()), + io_gas: regular + .as_ref() + .map_or(0, |gas| gas.algebra().io_gas_used().into()), + }); i += 1; } // take median of all running time iterations as a more robust measurement - times.sort(); - let length = times.len(); + measurements.sort_by_key(|v| v.elapsed); + let length = measurements.len(); let mid = length / 2; - let mut running_time = times[mid]; + let mut measurement = measurements[mid].clone(); if length % 2 == 0 { - running_time = (times[mid - 1] + times[mid]) / 2; + measurement = Measurement { + elapsed: (measurements[mid - 1].elapsed + measurements[mid].elapsed) / 2, + execution_gas: (measurements[mid - 1].execution_gas + + measurements[mid].execution_gas) + / 2, + io_gas: (measurements[mid - 1].io_gas + measurements[mid].io_gas) / 2, + }; } - running_time + measurement } /// record abstract usage using a modified gas meter diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index a1b918e9a8984..53d815ae1ed8a 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -1752,7 +1752,7 @@ Owner capability does not exist at the provided account. -Validator set change temporarily disabled because of in-progress reconfiguration. +Validator set change temporarily disabled because of in-progress reconfiguration. Please retry after 1 minute.
const ERECONFIGURATION_IN_PROGRESS: u64 = 20;
@@ -4566,6 +4566,39 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
+
+
+
+
fun spec_validator_index_upper_bound(): u64 {
+   len(global<ValidatorPerformance>(@aptos_framework).validators)
+}
+
+ + + + + + + +
fun spec_has_stake_pool(a: address): bool {
+   exists<StakePool>(a)
+}
+
+ + + + + + + +
fun spec_has_validator_config(a: address): bool {
+   exists<ValidatorConfig>(a)
+}
+
+ + + + @@ -5611,39 +5644,6 @@ Returns validator's next epoch voting power, including pending_active, active, a - - - - -
fun spec_validator_index_upper_bound(): u64 {
-   len(global<ValidatorPerformance>(@aptos_framework).validators)
-}
-
- - - - - - - -
fun spec_has_stake_pool(a: address): bool {
-   exists<StakePool>(a)
-}
-
- - - - - - - -
fun spec_has_validator_config(a: address): bool {
-   exists<ValidatorConfig>(a)
-}
-
- - - ### Function `update_stake_pool` diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move index 9a4ae53bf4aa5..9dd84dee90352 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.move +++ b/aptos-move/framework/aptos-framework/sources/stake.move @@ -79,7 +79,7 @@ module aptos_framework::stake { const EINVALID_LOCKUP: u64 = 18; /// Table to store collected transaction fees for each validator already exists. const EFEES_TABLE_ALREADY_EXISTS: u64 = 19; - /// Validator set change temporarily disabled because of in-progress reconfiguration. + /// Validator set change temporarily disabled because of in-progress reconfiguration. Please retry after 1 minute. const ERECONFIGURATION_IN_PROGRESS: u64 = 20; /// Validator status enum. We can switch to proper enum later once Move supports it. diff --git a/aptos-move/script-composer/Cargo.toml b/aptos-move/script-composer/Cargo.toml index 4176089fa6a49..12b1a3a541c03 100644 --- a/aptos-move/script-composer/Cargo.toml +++ b/aptos-move/script-composer/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos-dynamic-transaction-composer" description = "Generating Move Script from a batched Move calls" -version = "0.1.0" +version = "0.1.1" # Workspace inherited keys authors = { workspace = true } @@ -27,7 +27,6 @@ reqwest = { workspace = true, features = ["blocking"] } serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } -tsify-next = { workspace = true } wasm-bindgen = { workspace = true } wasm-bindgen-futures = { workspace = true } diff --git a/aptos-move/script-composer/src/builder.rs b/aptos-move/script-composer/src/builder.rs index 4f93194d1924d..07c2442a0bc89 100644 --- a/aptos-move/script-composer/src/builder.rs +++ b/aptos-move/script-composer/src/builder.rs @@ -35,11 +35,10 @@ use move_core_types::{ use serde::{Deserialize, Serialize}; use serde_json::Value; use std::{collections::BTreeMap, str::FromStr}; -use tsify_next::Tsify; use wasm_bindgen::prelude::*; #[wasm_bindgen] -#[derive(Tsify, Clone, Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct AllocatedLocal { op_type: ArgumentOperation, is_parameter: bool, @@ -169,10 +168,16 @@ impl TransactionComposer { module: String, function: String, ty_args: Vec, - args: Vec, - ) -> Result, JsValue> { - self.add_batched_call(module, function, ty_args, args) - .map_err(|err| JsValue::from(format!("{:?}", err))) + args: Vec, + ) -> Result, JsValue> { + self.add_batched_call( + module, + function, + ty_args, + args.into_iter().map(|a| a.into()).collect(), + ) + .map_err(|err| JsValue::from(format!("{:?}", err))) + .map(|results| results.into_iter().map(|a| a.into()).collect()) } } @@ -554,3 +559,86 @@ impl AllocatedLocal { }) } } + +#[derive(Clone, Debug)] +pub enum ArgumentType { + Signer, + Raw, + PreviousResult, +} + +/// WASM Representation of CallArgument. This is because wasm_bindgen can only support c-style enum. +#[wasm_bindgen(js_name = "CallArgument")] +#[derive(Clone, Debug)] +pub struct CallArgumentWasm { + ty: ArgumentType, + signer: Option, + raw: Option>, + previous_result: Option, +} + +impl From for CallArgumentWasm { + fn from(value: CallArgument) -> Self { + match value { + CallArgument::PreviousResult(r) => CallArgumentWasm { + ty: ArgumentType::PreviousResult, + signer: None, + raw: None, + previous_result: Some(r), + }, + CallArgument::Raw(b) => CallArgumentWasm { + ty: ArgumentType::Raw, + signer: None, + raw: Some(b), + previous_result: None, + }, + CallArgument::Signer(i) => CallArgumentWasm { + ty: ArgumentType::Signer, + signer: Some(i), + raw: None, + previous_result: None, + }, + } + } +} + +impl From for CallArgument { + fn from(value: CallArgumentWasm) -> Self { + match value.ty { + ArgumentType::PreviousResult => { + CallArgument::PreviousResult(value.previous_result.unwrap()) + }, + ArgumentType::Raw => CallArgument::Raw(value.raw.unwrap()), + ArgumentType::Signer => CallArgument::Signer(value.signer.unwrap()), + } + } +} + +#[wasm_bindgen(js_class = "CallArgument")] +impl CallArgumentWasm { + pub fn new_bytes(bytes: Vec) -> Self { + CallArgument::Raw(bytes).into() + } + + pub fn new_signer(signer_idx: u16) -> Self { + CallArgument::Signer(signer_idx).into() + } + + pub fn borrow(&self) -> Result { + self.change_op_type(ArgumentOperation::Borrow) + } + + pub fn borrow_mut(&self) -> Result { + self.change_op_type(ArgumentOperation::BorrowMut) + } + + pub fn copy(&self) -> Result { + self.change_op_type(ArgumentOperation::Copy) + } + + fn change_op_type(&self, operation_type: ArgumentOperation) -> Result { + Ok(CallArgument::from(self.clone()) + .change_op_type(operation_type)? + .into()) + } +} diff --git a/aptos-move/script-composer/src/lib.rs b/aptos-move/script-composer/src/lib.rs index cdd212f4d953a..fc34d2e489dea 100644 --- a/aptos-move/script-composer/src/lib.rs +++ b/aptos-move/script-composer/src/lib.rs @@ -11,7 +11,6 @@ use move_core_types::{ language_storage::{ModuleId, TypeTag}, }; use serde::{Deserialize, Serialize}; -use tsify_next::Tsify; use wasm_bindgen::prelude::wasm_bindgen; mod builder; @@ -26,6 +25,7 @@ pub mod tests; pub static APTOS_SCRIPT_COMPOSER_KEY: &[u8] = "aptos::script_composer".as_bytes(); /// Representing a returned value from a previous list of `MoveFunctionCall`s. +#[wasm_bindgen] #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct PreviousResult { /// Refering to the return value in the `call_idx`th call. @@ -38,8 +38,7 @@ pub struct PreviousResult { } /// Arguments to the `MoveFunctionCall`. -#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, Tsify)] -#[tsify(into_wasm_abi, from_wasm_abi)] +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub enum CallArgument { /// Passing raw bytes to the function. The bytes must follows the existing constraints for /// transaction arguments. @@ -92,7 +91,6 @@ impl MoveFunctionCall { } } -#[wasm_bindgen] impl CallArgument { pub fn new_bytes(bytes: Vec) -> Self { CallArgument::Raw(bytes) @@ -114,7 +112,10 @@ impl CallArgument { self.change_op_type(ArgumentOperation::Copy) } - fn change_op_type(&self, operation_type: ArgumentOperation) -> Result { + pub(crate) fn change_op_type( + &self, + operation_type: ArgumentOperation, + ) -> Result { match &self { CallArgument::PreviousResult(r) => { let mut result = r.clone(); diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 23ad6125e7d07..239713079c616 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -22,7 +22,8 @@ use aptos_types::{ contract_event::{ContractEvent, ContractEventV1}, executable::ModulePath, jwks::{ - patch::{PatchJWKMoveStruct, PatchUpsertJWK}, + jwk::{JWKMoveStruct, JWK}, + patch::{IssuerJWK, PatchJWKMoveStruct, PatchUpsertJWK}, secure_test_rsa_jwk, }, keyless::{ @@ -109,6 +110,8 @@ pub struct GenesisConfiguration { pub initial_features_override: Option, pub randomness_config_override: Option, pub jwk_consensus_config_override: Option, + pub initial_jwks: Vec, + pub keyless_groth16_vk_override: Option, } pub static GENESIS_KEYPAIR: Lazy<(Ed25519PrivateKey, Ed25519PublicKey)> = Lazy::new(|| { @@ -304,7 +307,13 @@ pub fn encode_genesis_change_set( .unwrap_or_else(OnChainJWKConsensusConfig::default_for_genesis); initialize_jwk_consensus_config(&mut session, &module_storage, &jwk_consensus_config); initialize_jwks_resources(&mut session, &module_storage); - initialize_keyless_accounts(&mut session, &module_storage, chain_id); + initialize_keyless_accounts( + &mut session, + &module_storage, + chain_id, + genesis_config.initial_jwks.clone(), + genesis_config.keyless_groth16_vk_override.clone(), + ); set_genesis_end(&mut session, &module_storage); // Reconfiguration should happen after all on-chain invocations. @@ -676,6 +685,8 @@ fn initialize_keyless_accounts( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, chain_id: ChainId, + mut initial_jwks: Vec, + vk_override: Option, ) { let config = keyless::Configuration::new_for_devnet(); exec_function( @@ -690,7 +701,8 @@ fn initialize_keyless_accounts( ]), ); if !chain_id.is_mainnet() { - let vk = Groth16VerificationKey::from(&*DEVNET_VERIFICATION_KEY); + let vk = + vk_override.unwrap_or_else(|| Groth16VerificationKey::from(&*DEVNET_VERIFICATION_KEY)); exec_function( session, module_storage, @@ -703,11 +715,24 @@ fn initialize_keyless_accounts( ]), ); - let patch: PatchJWKMoveStruct = PatchUpsertJWK { + let additional_jwk_patch = IssuerJWK { issuer: get_sample_iss(), - jwk: secure_test_rsa_jwk().into(), - } - .into(); + jwk: JWK::RSA(secure_test_rsa_jwk()), + }; + initial_jwks.insert(0, additional_jwk_patch); + + let jwk_patches: Vec = initial_jwks + .into_iter() + .map(|issuer_jwk| { + let IssuerJWK { issuer, jwk } = issuer_jwk; + let upsert_patch = PatchUpsertJWK { + issuer, + jwk: JWKMoveStruct::from(jwk), + }; + PatchJWKMoveStruct::from(upsert_patch) + }) + .collect(); + exec_function( session, module_storage, @@ -716,7 +741,7 @@ fn initialize_keyless_accounts( vec![], serialize_values(&vec![ MoveValue::Signer(CORE_CODE_ADDRESS), - MoveValue::Vector(vec![patch.as_move_value()]), + jwk_patches.as_move_value(), ]), ); } @@ -1229,6 +1254,8 @@ pub fn generate_test_genesis( initial_features_override: None, randomness_config_override: None, jwk_consensus_config_override: None, + initial_jwks: vec![], + keyless_groth16_vk_override: None, }, &OnChainConsensusConfig::default_for_genesis(), &OnChainExecutionConfig::default_for_genesis(), @@ -1279,6 +1306,8 @@ fn mainnet_genesis_config() -> GenesisConfiguration { initial_features_override: None, randomness_config_override: None, jwk_consensus_config_override: None, + initial_jwks: vec![], + keyless_groth16_vk_override: None, } } diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index 2890e021867f2..80e56bb13bd55 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -715,11 +715,11 @@ impl BatchProofQueue { } pub(crate) fn handle_updated_block_timestamp(&mut self, block_timestamp: u64) { + // tolerate asynchronous notification + if self.latest_block_timestamp > block_timestamp { + return; + } let start = Instant::now(); - assert!( - self.latest_block_timestamp <= block_timestamp, - "Decreasing block timestamp" - ); self.latest_block_timestamp = block_timestamp; if let Some(time_lag) = aptos_infallible::duration_since_epoch() .checked_sub(Duration::from_micros(block_timestamp)) diff --git a/consensus/src/transaction_shuffler/use_case_aware/mod.rs b/consensus/src/transaction_shuffler/use_case_aware/mod.rs index e97cee3f39c40..5b04b96952cb3 100644 --- a/consensus/src/transaction_shuffler/use_case_aware/mod.rs +++ b/consensus/src/transaction_shuffler/use_case_aware/mod.rs @@ -2,10 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::transaction_shuffler::TransactionShuffler; -use aptos_types::transaction::{ - use_case::{UseCaseAwareTransaction, UseCaseKey}, - SignedTransaction, -}; +use aptos_types::transaction::{use_case::UseCaseKey, SignedTransaction}; use iterator::ShuffledTransactionIterator; use std::fmt::Debug; @@ -45,7 +42,9 @@ pub struct UseCaseAwareShuffler { #[cfg(any(test, feature = "fuzzing"))] impl UseCaseAwareShuffler { - pub fn shuffle_generic( + pub fn shuffle_generic< + Txn: aptos_types::transaction::use_case::UseCaseAwareTransaction + Debug, + >( &self, txns: Vec, ) -> Vec { diff --git a/crates/aptos-drop-helper/Cargo.toml b/crates/aptos-drop-helper/Cargo.toml index 936ef297eeebc..3fbe732b66565 100644 --- a/crates/aptos-drop-helper/Cargo.toml +++ b/crates/aptos-drop-helper/Cargo.toml @@ -18,3 +18,6 @@ aptos-metrics-core = { workspace = true } derive_more = { workspace = true } once_cell = { workspace = true } threadpool = { workspace = true } + +[dev-dependencies] +rayon = { workspace = true } diff --git a/crates/aptos-drop-helper/src/async_concurrent_dropper.rs b/crates/aptos-drop-helper/src/async_concurrent_dropper.rs index 6fcb7ffc2e2f0..15231062e52ca 100644 --- a/crates/aptos-drop-helper/src/async_concurrent_dropper.rs +++ b/crates/aptos-drop-helper/src/async_concurrent_dropper.rs @@ -1,7 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::metrics::{GAUGE, TIMER}; +use crate::{ + metrics::{GAUGE, TIMER}, + IN_ANY_DROP_POOL, +}; use aptos_infallible::Mutex; use aptos_metrics_core::{IntGaugeHelper, TimerHelper}; use std::sync::{ @@ -42,12 +45,25 @@ impl AsyncConcurrentDropper { rx } + pub fn max_tasks(&self) -> usize { + self.num_tasks_tracker.max_tasks + } + + pub fn num_threads(&self) -> usize { + self.thread_pool.max_count() + } + pub fn wait_for_backlog_drop(&self, no_more_than: usize) { let _timer = TIMER.timer_with(&[self.name, "wait_for_backlog_drop"]); self.num_tasks_tracker.wait_for_backlog_drop(no_more_than); } fn schedule_drop_impl(&self, v: V, notif_sender_opt: Option>) { + if IN_ANY_DROP_POOL.get() { + Self::do_drop(v, notif_sender_opt); + return; + } + let _timer = TIMER.timer_with(&[self.name, "enqueue_drop"]); self.num_tasks_tracker.inc(); @@ -57,15 +73,23 @@ impl AsyncConcurrentDropper { self.thread_pool.execute(move || { let _timer = TIMER.timer_with(&[name, "real_drop"]); - drop(v); + IN_ANY_DROP_POOL.with(|flag| { + flag.set(true); + }); - if let Some(sender) = notif_sender_opt { - sender.send(()).ok(); - } + Self::do_drop(v, notif_sender_opt); num_tasks_tracker.dec(); }) } + + fn do_drop(v: V, notif_sender_opt: Option>) { + drop(v); + + if let Some(sender) = notif_sender_opt { + sender.send(()).ok(); + } + } } struct NumTasksTracker { @@ -111,10 +135,12 @@ impl NumTasksTracker { #[cfg(test)] mod tests { - use crate::AsyncConcurrentDropper; + use crate::{AsyncConcurrentDropper, DropHelper, DEFAULT_DROPPER}; + use rayon::prelude::*; use std::{sync::Arc, thread::sleep, time::Duration}; use threadpool::ThreadPool; + #[derive(Clone, Default)] struct SlowDropper; impl Drop for SlowDropper { @@ -197,4 +223,25 @@ mod tests { s.wait_for_backlog_drop(0); assert!(now.elapsed() < Duration::from_millis(600)); } + + #[test] + fn test_nested_drops() { + #[derive(Clone, Default)] + struct Nested { + _inner: DropHelper, + } + + // pump 2 x max_tasks to the drop queue + let num_items = DEFAULT_DROPPER.max_tasks() * 2; + let items = vec![DropHelper::new(Nested::default()); num_items]; + let drop_thread = std::thread::spawn(move || { + items.into_par_iter().for_each(drop); + }); + + // expect no deadlock and the whole thing to be dropped in full concurrency (with some leeway) + sleep(Duration::from_millis( + 200 + 200 * num_items as u64 / DEFAULT_DROPPER.num_threads() as u64, + )); + assert!(drop_thread.is_finished(), "Drop queue deadlocked."); + } } diff --git a/crates/aptos-drop-helper/src/lib.rs b/crates/aptos-drop-helper/src/lib.rs index 169aae9c41fe3..e80b3008f23dc 100644 --- a/crates/aptos-drop-helper/src/lib.rs +++ b/crates/aptos-drop-helper/src/lib.rs @@ -4,12 +4,16 @@ use crate::async_concurrent_dropper::AsyncConcurrentDropper; use derive_more::{Deref, DerefMut}; use once_cell::sync::Lazy; -use std::mem::ManuallyDrop; +use std::{cell::Cell, mem::ManuallyDrop}; pub mod async_concurrent_dropper; pub mod async_drop_queue; mod metrics; +thread_local! { + static IN_ANY_DROP_POOL: Cell = const { Cell::new(false) }; +} + pub static DEFAULT_DROPPER: Lazy = Lazy::new(|| AsyncConcurrentDropper::new("default", 32, 8)); diff --git a/crates/aptos-genesis/src/builder.rs b/crates/aptos-genesis/src/builder.rs index 4117f9a4f2fe5..01921473e7c9b 100644 --- a/crates/aptos-genesis/src/builder.rs +++ b/crates/aptos-genesis/src/builder.rs @@ -27,6 +27,8 @@ use aptos_keygen::KeyGen; use aptos_logger::prelude::*; use aptos_types::{ chain_id::ChainId, + jwks::patch::IssuerJWK, + keyless::Groth16VerificationKey, on_chain_config::{ Features, GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig, OnChainJWKConsensusConfig, OnChainRandomnessConfig, @@ -441,6 +443,8 @@ pub struct GenesisConfiguration { pub initial_features_override: Option, pub randomness_config_override: Option, pub jwk_consensus_config_override: Option, + pub initial_jwks: Vec, + pub keyless_groth16_vk_override: Option, } pub type InitConfigFn = Arc; @@ -662,6 +666,8 @@ impl Builder { initial_features_override: None, randomness_config_override: None, jwk_consensus_config_override: None, + initial_jwks: vec![], + keyless_groth16_vk_override: None, }; if let Some(init_genesis_config) = &self.init_genesis_config { (init_genesis_config)(&mut genesis_config); diff --git a/crates/aptos-genesis/src/config.rs b/crates/aptos-genesis/src/config.rs index 5e39ce08c17ac..bc6019f9cd661 100644 --- a/crates/aptos-genesis/src/config.rs +++ b/crates/aptos-genesis/src/config.rs @@ -6,6 +6,8 @@ use aptos_crypto::{bls12381, ed25519::Ed25519PublicKey, x25519}; use aptos_types::{ account_address::{AccountAddress, AccountAddressWithChecks}, chain_id::ChainId, + jwks::patch::IssuerJWK, + keyless::Groth16VerificationKey, network_address::{DnsName, NetworkAddress, Protocol}, on_chain_config::{OnChainConsensusConfig, OnChainExecutionConfig, OnChainJWKConsensusConfig}, transaction::authenticator::AuthenticationKey, @@ -75,7 +77,16 @@ pub struct Layout { pub on_chain_execution_config: OnChainExecutionConfig, /// An optional JWK consensus config to use, instead of `default_for_genesis()`. + #[serde(default)] pub jwk_consensus_config_override: Option, + + /// JWKs to patch in genesis. + #[serde(default)] + pub initial_jwks: Vec, + + /// Keyless Groth16 verification key to install in genesis. + #[serde(default)] + pub keyless_groth16_vk_override: Option, } impl Layout { @@ -116,6 +127,8 @@ impl Default for Layout { on_chain_consensus_config: OnChainConsensusConfig::default(), on_chain_execution_config: OnChainExecutionConfig::default_for_genesis(), jwk_consensus_config_override: None, + initial_jwks: vec![], + keyless_groth16_vk_override: None, } } } diff --git a/crates/aptos-genesis/src/lib.rs b/crates/aptos-genesis/src/lib.rs index 8a2c20aad3686..8fed37e85cfa6 100644 --- a/crates/aptos-genesis/src/lib.rs +++ b/crates/aptos-genesis/src/lib.rs @@ -23,6 +23,8 @@ use aptos_storage_interface::DbReaderWriter; use aptos_temppath::TempPath; use aptos_types::{ chain_id::ChainId, + jwks::patch::IssuerJWK, + keyless::Groth16VerificationKey, on_chain_config::{ Features, GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig, OnChainJWKConsensusConfig, OnChainRandomnessConfig, @@ -76,6 +78,8 @@ pub struct GenesisInfo { pub initial_features_override: Option, pub randomness_config_override: Option, pub jwk_consensus_config_override: Option, + pub initial_jwks: Vec, + pub keyless_groth16_vk_override: Option, } impl GenesisInfo { @@ -115,6 +119,8 @@ impl GenesisInfo { initial_features_override: genesis_config.initial_features_override.clone(), randomness_config_override: genesis_config.randomness_config_override.clone(), jwk_consensus_config_override: genesis_config.jwk_consensus_config_override.clone(), + initial_jwks: genesis_config.initial_jwks.clone(), + keyless_groth16_vk_override: genesis_config.keyless_groth16_vk_override.clone(), }) } @@ -150,6 +156,8 @@ impl GenesisInfo { initial_features_override: self.initial_features_override.clone(), randomness_config_override: self.randomness_config_override.clone(), jwk_consensus_config_override: self.jwk_consensus_config_override.clone(), + initial_jwks: self.initial_jwks.clone(), + keyless_groth16_vk_override: self.keyless_groth16_vk_override.clone(), }, &self.consensus_config, &self.execution_config, diff --git a/crates/aptos-genesis/src/mainnet.rs b/crates/aptos-genesis/src/mainnet.rs index 0bc92acc5add0..37ece1845d719 100644 --- a/crates/aptos-genesis/src/mainnet.rs +++ b/crates/aptos-genesis/src/mainnet.rs @@ -143,6 +143,8 @@ impl MainnetGenesisInfo { initial_features_override: self.initial_features_override.clone(), randomness_config_override: self.randomness_config_override.clone(), jwk_consensus_config_override: self.jwk_consensus_config_override.clone(), + initial_jwks: vec![], + keyless_groth16_vk_override: None, }, ) } diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index 0a2e76274f8bc..c58dd35283552 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -765,7 +765,7 @@ impl Client { if let Some(state) = aptos_error_response.state { if expiration_timestamp_secs <= state.timestamp_usecs / 1_000_000 { if reached_mempool { - return Err(anyhow!("Transaction expired. It is guaranteed it will not be committed on chain.").into()); + return Err(anyhow!("Used to be pending and now not found. Transaction expired. It is guaranteed it will not be committed on chain.").into()); } else { // We want to know whether we ever got Pending state from the mempool, // to warn in case we didn't. diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index a0efff726a5d8..af13bddb60bfb 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -2,11 +2,13 @@ All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). -## Unreleased - ## [4.5.0] - 2024/11/15 - Determine network from URL to make explorer links better for legacy users - Add support for AIP-80 compliant strings when importing using the CLI arguments or manual input. +- Add option `--print-metadata-only` to `aptos move decompile` and `aptos move disassemble` to print out the metadata attached to the bytecode. +- Add `--existing-hasura-url` flag to localnet to tell it to use an existing Hasura instance instead of run Hasura itself. See https://github.com/aptos-labs/aptos-core/pull/15313. +- Add `--skip-metadata-apply` flag to localnet, in which case we won't try to apply the Hasura metadata. +- Upgrade Hasura image we use from 2.40.2 to 2.44.0. ## [4.4.0] - 2024/11/06 - Fix typos in `aptos move compile` help text. diff --git a/crates/aptos/src/genesis/mod.rs b/crates/aptos/src/genesis/mod.rs index 7dabbc9c609e5..001f9ae1a93ca 100644 --- a/crates/aptos/src/genesis/mod.rs +++ b/crates/aptos/src/genesis/mod.rs @@ -260,6 +260,8 @@ pub fn fetch_mainnet_genesis_info(git_options: GitOptions) -> CliTypedResult CliTypedResult for Decompile { } } +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +struct BytecodeMetadata { + aptos_metadata: Option, + bytecode_version: u32, + compilation_metadata: CompilationMetadata, +} + impl BytecodeCommand { async fn execute(self, command_type: BytecodeCommandType) -> CliTypedResult { let inputs = if let Some(path) = self.input.bytecode_path.clone() { @@ -141,6 +160,10 @@ impl BytecodeCommand { unreachable!("arguments required by clap") }; + if self.print_metadata_only && self.input.bytecode_path.is_some() { + return self.print_metadata(&inputs[0]); + } + let mut report = vec![]; let mut last_out_dir = String::new(); for bytecode_path in inputs { @@ -201,6 +224,45 @@ impl BytecodeCommand { }) } + fn print_metadata(&self, bytecode_path: &Path) -> Result { + let bytecode_bytes = read_from_file(bytecode_path)?; + + let v1_metadata = CompilationMetadata { + unstable: false, + compiler_version: CompilerVersion::V1.to_string(), + language_version: LanguageVersion::V1.to_string(), + }; + let metadata = if self.is_script { + let script = CompiledScript::deserialize(&bytecode_bytes) + .context("Script blob can't be deserialized")?; + if let Some(data) = get_compilation_metadata_from_compiled_script(&script) { + serde_json::to_string_pretty(&data).expect("expect compilation metadata") + } else { + serde_json::to_string_pretty(&v1_metadata).expect("expect compilation metadata") + }; + BytecodeMetadata { + aptos_metadata: get_metadata_from_compiled_script(&script), + bytecode_version: script.version, + compilation_metadata: get_compilation_metadata_from_compiled_script(&script) + .unwrap_or(v1_metadata), + } + } else { + let module = CompiledModule::deserialize(&bytecode_bytes) + .context("Module blob can't be deserialized")?; + BytecodeMetadata { + aptos_metadata: get_metadata_from_compiled_module(&module), + bytecode_version: module.version, + compilation_metadata: get_compilation_metadata_from_compiled_module(&module) + .unwrap_or(v1_metadata), + } + }; + println!( + "Metadata: {}", + serde_json::to_string_pretty(&metadata).expect("expect metadata") + ); + Ok("ok".to_string()) + } + fn disassemble(&self, bytecode_path: &Path) -> Result { let bytecode_bytes = read_from_file(bytecode_path)?; let move_path = bytecode_path.with_extension(MOVE_EXTENSION); diff --git a/crates/aptos/src/node/local_testnet/indexer_api.rs b/crates/aptos/src/node/local_testnet/indexer_api.rs index 1967b1ceb2540..8ff415a879605 100644 --- a/crates/aptos/src/node/local_testnet/indexer_api.rs +++ b/crates/aptos/src/node/local_testnet/indexer_api.rs @@ -20,11 +20,11 @@ use clap::Parser; use futures::TryStreamExt; use maplit::{hashmap, hashset}; use reqwest::Url; -use std::{collections::HashSet, path::PathBuf}; +use std::{collections::HashSet, path::PathBuf, time::Duration}; use tracing::{info, warn}; const INDEXER_API_CONTAINER_NAME: &str = "local-testnet-indexer-api"; -const HASURA_IMAGE: &str = "hasura/graphql-engine:v2.40.2-ce"; +const HASURA_IMAGE: &str = "hasura/graphql-engine:v2.44.0-ce"; /// This Hasura metadata originates from the aptos-indexer-processors repo. /// @@ -47,22 +47,42 @@ const HASURA_METADATA: &str = include_str!("hasura_metadata.json"); /// Args related to running an indexer API for the localnet. #[derive(Debug, Parser)] pub struct IndexerApiArgs { - /// If set, we will run a postgres DB using Docker (unless - /// --use-host-postgres is set), run the standard set of indexer processors (see - /// --processors), and configure them to write to this DB, and run an API that lets - /// you access the data they write to storage. This is opt in because it requires - /// Docker to be installed on the host system. + /// If set, we will run a postgres DB using Docker (unless --use-host-postgres is + /// set), run the standard set of indexer processors (see --processors), and + /// configure them to write to this DB, and run an API that lets you access the data + /// they write to storage. This is opt in because it requires Docker to be installed + /// on the host system. #[clap(long, conflicts_with = "no_txn_stream")] pub with_indexer_api: bool, /// The port at which to run the indexer API. #[clap(long, default_value_t = 8090)] pub indexer_api_port: u16, + + /// If set we will assume a Hasura instance is running at the given URL rather than + /// running our own. + /// + /// If set, we will not run the indexer API, and will instead assume that a Hasura + /// instance is running at the given URL. We will wait for it to become healthy by + /// waiting for / to return 200 and then apply the Hasura metadata. The URL should + /// look something like this: http://127.0.0.1:8090, assuming the Hasura instance is + /// running at port 8090. When the localnet shuts down, we will not attempt to stop + /// the Hasura instance, this is up to you to handle. If you're using this, you + /// should probably use `--use-host-postgres` as well, otherwise you won't be able + /// to start your Hasura instance because the DB we create won't exist yet. + #[clap(long)] + pub existing_hasura_url: Option, + + /// If set, we will not try to apply the Hasura metadata. + #[clap(long)] + pub skip_metadata_apply: bool, } #[derive(Clone, Debug)] pub struct IndexerApiManager { indexer_api_port: u16, + existing_hasura_url: Option, + skip_metadata_apply: bool, prerequisite_health_checkers: HashSet, test_dir: PathBuf, postgres_connection_string: String, @@ -77,6 +97,8 @@ impl IndexerApiManager { ) -> Result { Ok(Self { indexer_api_port: args.indexer_api_args.indexer_api_port, + existing_hasura_url: args.indexer_api_args.existing_hasura_url.clone(), + skip_metadata_apply: args.indexer_api_args.skip_metadata_apply, prerequisite_health_checkers, test_dir, postgres_connection_string, @@ -84,7 +106,10 @@ impl IndexerApiManager { } pub fn get_url(&self) -> Url { - Url::parse(&format!("http://127.0.0.1:{}", self.indexer_api_port)).unwrap() + match &self.existing_hasura_url { + Some(url) => url.clone(), + None => Url::parse(&format!("http://127.0.0.1:{}", self.indexer_api_port)).unwrap(), + } } } @@ -95,6 +120,10 @@ impl ServiceManager for IndexerApiManager { } async fn pre_run(&self) -> Result<()> { + if self.existing_hasura_url.is_some() { + return Ok(()); + } + // Confirm Docker is available. get_docker().await?; @@ -120,12 +149,15 @@ impl ServiceManager for IndexerApiManager { /// In this case we we return two HealthCheckers, one for whether the Hasura API /// is up at all and one for whether the metadata is applied. fn get_health_checkers(&self) -> HashSet { - hashset! { + let mut checkers = hashset! { // This first one just checks if the API is up at all. HealthChecker::Http(self.get_url(), "Indexer API".to_string()), + }; + if !self.skip_metadata_apply { // This second one checks if the metadata is applied. - HealthChecker::IndexerApiMetadata(self.get_url()), + checkers.insert(HealthChecker::IndexerApiMetadata(self.get_url())); } + checkers } fn get_prerequisite_health_checkers(&self) -> HashSet<&HealthChecker> { @@ -133,6 +165,25 @@ impl ServiceManager for IndexerApiManager { } async fn run_service(self: Box) -> Result<()> { + // If we're using an existing Hasura instance we just do nothing. If the Hasura + // instance becomes unhealthy we print an error and exit. + if let Some(url) = self.existing_hasura_url { + info!("Using existing Hasura instance at {}", url); + // Periodically check that the Hasura instance is healthy. + let checker = HealthChecker::Http(url.clone(), "Indexer API".to_string()); + loop { + if let Err(e) = checker.wait(None).await { + eprintln!( + "Existing Hasura instance at {} became unhealthy: {}", + url, e + ); + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + return Ok(()); + } + setup_docker_logging(&self.test_dir, "indexer-api", INDEXER_API_CONTAINER_NAME)?; // This is somewhat hard to maintain. If it requires any further maintenance we @@ -239,6 +290,10 @@ impl ServiceManager for IndexerApiManager { } fn get_post_healthy_steps(&self) -> Vec> { + if self.skip_metadata_apply { + return vec![]; + } + /// There is no good way to apply Hasura metadata (the JSON format, anyway) to /// an instance of Hasura in a container at startup: /// @@ -267,6 +322,10 @@ impl ServiceManager for IndexerApiManager { } fn get_shutdown_steps(&self) -> Vec> { + if self.existing_hasura_url.is_some() { + return vec![]; + } + // Unfortunately the Hasura container does not shut down when the CLI does and // there doesn't seem to be a good way to make it do so. To work around this, // we register a step that will stop the container on shutdown. diff --git a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs index db6c9383c023e..7168c146a1867 100644 --- a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs +++ b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs @@ -6,7 +6,10 @@ use aptos_sdk::types::{ AccountKey, EphemeralKeyPair, EphemeralPrivateKey, KeylessAccount, LocalAccount, }; use aptos_transaction_generator_lib::ReliableTransactionSubmitter; -use aptos_types::keyless::{Claims, OpenIdSig, Pepper, ZeroKnowledgeSig}; +use aptos_types::{ + keyless, + keyless::{Claims, OpenIdSig, Pepper, ZeroKnowledgeSig}, +}; use async_trait::async_trait; use futures::StreamExt; use rand::rngs::StdRng; @@ -36,6 +39,7 @@ pub fn create_keyless_account_generator( epk_expiry_date_secs: u64, jwt: &str, proof_file_path: Option<&str>, + keyless_config: keyless::Configuration, ) -> anyhow::Result> { let parts: Vec<&str> = jwt.split('.').collect(); let header_bytes = base64::decode(parts[0]).unwrap(); @@ -51,6 +55,7 @@ pub fn create_keyless_account_generator( uid_key: "sub".to_owned(), uid_val: claims.oidc_claims.sub, jwt_header_json, + keyless_config, })) } @@ -109,6 +114,9 @@ pub struct KeylessAccountGenerator { uid_key: String, uid_val: String, jwt_header_json: String, + /// We assume the on-chain keyless config won't change and cache it here. + /// Needed by nonce generation. + keyless_config: keyless::Configuration, } #[async_trait] @@ -153,7 +161,8 @@ impl LocalAccountGenerator for KeylessAccountGenerator { &self.uid_key, &self.uid_val, &self.jwt_header_json, - EphemeralKeyPair::new( + EphemeralKeyPair::new_with_keyless_config( + &self.keyless_config, esk, self.epk_expiry_date_secs, vec![0; OpenIdSig::EPK_BLINDER_NUM_BYTES], diff --git a/crates/transaction-emitter-lib/src/emitter/mod.rs b/crates/transaction-emitter-lib/src/emitter/mod.rs index 78a140cc4f997..c6a31e04976f7 100644 --- a/crates/transaction-emitter-lib/src/emitter/mod.rs +++ b/crates/transaction-emitter-lib/src/emitter/mod.rs @@ -717,13 +717,15 @@ impl EmitJob { pub struct TxnEmitter { txn_factory: TransactionFactory, rng: StdRng, + rest_cli: RestClient, } impl TxnEmitter { - pub fn new(transaction_factory: TransactionFactory, rng: StdRng) -> Self { + pub fn new(transaction_factory: TransactionFactory, rng: StdRng, rest_cli: RestClient) -> Self { Self { txn_factory: transaction_factory, rng, + rest_cli, } } @@ -775,6 +777,11 @@ impl TxnEmitter { .expect("keyless_ephem_secret_key to not be None") .as_ref(), )?; + let keyless_config = self + .rest_cli + .get_resource(AccountAddress::ONE, "0x1::keyless_account::Configuration") + .await? + .into_inner(); create_keyless_account_generator( ephem_sk, req.epk_expiry_date_secs @@ -783,6 +790,7 @@ impl TxnEmitter { .as_deref() .expect("keyless_jwt to not be None"), req.proof_file_path.as_deref(), + keyless_config, )? }, }; diff --git a/crates/transaction-emitter-lib/src/wrappers.rs b/crates/transaction-emitter-lib/src/wrappers.rs index 2cc2188104b89..98cc96f143d78 100644 --- a/crates/transaction-emitter-lib/src/wrappers.rs +++ b/crates/transaction-emitter-lib/src/wrappers.rs @@ -19,7 +19,7 @@ use anyhow::{bail, Context, Result}; use aptos_logger::{error, info}; use aptos_sdk::transaction_builder::TransactionFactory; use aptos_transaction_generator_lib::{args::TransactionTypeArg, AccountType, WorkflowProgress}; -use aptos_types::keyless::test_utils::get_sample_esk; +use aptos_types::{account_address::AccountAddress, keyless::test_utils::get_sample_esk}; use rand::{rngs::StdRng, SeedableRng}; use std::{ sync::Arc, @@ -87,6 +87,7 @@ pub async fn emit_transactions_with_cluster( .with_transaction_expiration_time(args.txn_expiration_time_secs) .with_gas_unit_price(aptos_global_constants::GAS_UNIT_PRICE), StdRng::from_entropy(), + client, ); let transaction_mix_per_phase = TransactionTypeArg::args_to_transaction_mix_per_phase( @@ -230,12 +231,17 @@ pub async fn create_accounts_command( let account_generator = if let Some(jwt) = &create_accounts_args.keyless_jwt { emit_job_request = emit_job_request.keyless_jwt(jwt); + let keyless_config = client + .get_resource(AccountAddress::ONE, "0x1::keyless_account::Configuration") + .await? + .into_inner(); create_keyless_account_generator( get_sample_esk(), 0, jwt, create_accounts_args.proof_file_path.as_deref(), + keyless_config, )? } else { Box::new(PrivateKeyAccountGenerator) diff --git a/crates/transaction-emitter/src/diag.rs b/crates/transaction-emitter/src/diag.rs index 8b68d41d40e59..e6636d6cdd150 100644 --- a/crates/transaction-emitter/src/diag.rs +++ b/crates/transaction-emitter/src/diag.rs @@ -19,6 +19,7 @@ pub async fn diag(cluster: &Cluster) -> Result<()> { TransactionFactory::new(cluster.chain_id) .with_gas_unit_price(aptos_global_constants::GAS_UNIT_PRICE), StdRng::from_entropy(), + client, ); let coin_source_account_address = coin_source_account.address(); let instances: Vec<_> = cluster.all_instances().collect(); diff --git a/crates/transaction-generator-lib/src/publishing/publish_util.rs b/crates/transaction-generator-lib/src/publishing/publish_util.rs index 4549b85cf327d..a1e059ef7f603 100644 --- a/crates/transaction-generator-lib/src/publishing/publish_util.rs +++ b/crates/transaction-generator-lib/src/publishing/publish_util.rs @@ -132,14 +132,17 @@ impl Package { } pub fn script(publisher: AccountAddress) -> TransactionPayload { + assert_ne!(publisher, AccountAddress::MAX_ADDRESS); + let code = &*raw_module_data::SCRIPT_SIMPLE; let mut script = CompiledScript::deserialize(code).expect("Script must deserialize"); - // Change the constant to the sender's address to change script's hash. - for constant in &mut script.constant_pool { - if constant.type_ == SignatureToken::Address { - constant.data = bcs::to_bytes(&publisher).expect("Address must serialize"); - break; + // Make sure dependencies link to published modules. Compiler V2 adds 0xf..ff so we need to + // skip it. + assert_eq!(script.address_identifiers.len(), 2); + for address in &mut script.address_identifiers { + if address != &AccountAddress::MAX_ADDRESS { + *address = publisher; } } diff --git a/crates/transaction-generator-lib/src/publishing/raw_module_data.rs b/crates/transaction-generator-lib/src/publishing/raw_module_data.rs index bc18986f4005a..195b894efff28 100644 --- a/crates/transaction-generator-lib/src/publishing/raw_module_data.rs +++ b/crates/transaction-generator-lib/src/publishing/raw_module_data.rs @@ -739,10 +739,10 @@ pub static MODULES_COMPLEX: Lazy>> = Lazy::new(|| { vec![ pub static PACKAGE_SIMPLE_METADATA: Lazy> = Lazy::new(|| { vec![ 13, 71, 101, 110, 101, 114, 105, 99, 77, 111, 100, 117, 108, 101, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 64, 69, 70, 50, 54, 66, 50, 69, 50, 55, 66, 69, 54, - 50, 53, 68, 56, 67, 56, 50, 48, 50, 57, 53, 48, 50, 51, 49, 70, 69, 55, - 68, 51, 55, 50, 69, 49, 65, 66, 66, 50, 69, 49, 52, 65, 65, 50, 69, 54, - 66, 66, 51, 54, 55, 68, 67, 49, 69, 67, 53, 57, 54, 53, 48, 68, 132, 1, + 0, 0, 0, 0, 0, 64, 57, 51, 57, 69, 67, 66, 51, 52, 55, 65, 56, 56, + 65, 52, 66, 57, 54, 48, 54, 65, 55, 53, 54, 55, 69, 52, 48, 57, 69, 56, + 53, 53, 65, 70, 69, 68, 51, 55, 56, 57, 68, 70, 70, 70, 65, 57, 51, 54, + 68, 66, 53, 55, 67, 48, 50, 57, 52, 69, 50, 65, 50, 56, 54, 54, 132, 1, 31, 139, 8, 0, 0, 0, 0, 0, 2, 255, 77, 139, 59, 14, 194, 48, 16, 68, 251, 61, 133, 229, 30, 135, 11, 80, 208, 64, 197, 9, 162, 20, 43, 123, 64, 86, 156, 93, 203, 134, 80, 32, 238, 142, 45, 1, 138, 102, 154, 249, 188, 49, 179, 159, @@ -762,16 +762,6 @@ pub static PACKAGE_SIMPLE_METADATA: Lazy> = Lazy::new(|| { ] }); -#[rustfmt::skip] -pub static SCRIPT_SIMPLE: Lazy> = Lazy::new(|| { - vec![ - 161, 28, 235, 11, 7, 0, 0, 10, 2, 5, 0, 4, 6, 4, 34, 1, 6, 12, - 0, 5, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, - 0, 1, 3, 11, 0, 1, 2, - ] -}); - #[rustfmt::skip] pub static MODULE_SIMPLE_SIMPLE: Lazy> = Lazy::new(|| { vec![ @@ -979,6 +969,28 @@ pub static MODULE_SIMPLE_SIMPLE: Lazy> = Lazy::new(|| { ] }); +#[rustfmt::skip] +pub static SCRIPT_SIMPLE: Lazy> = Lazy::new(|| { + vec![ + 161, 28, 235, 11, 7, 0, 0, 10, 6, 1, 0, 2, 3, 2, 18, 5, 20, 16, + 7, 36, 55, 8, 91, 64, 16, 155, 1, 31, 1, 2, 0, 3, 2, 1, 0, 1, + 0, 4, 2, 1, 0, 1, 0, 5, 3, 1, 0, 1, 1, 6, 12, 0, 2, 6, + 12, 3, 3, 6, 12, 3, 3, 2, 3, 3, 8, 60, 83, 69, 76, 70, 62, 95, + 48, 4, 109, 97, 105, 110, 6, 115, 105, 109, 112, 108, 101, 8, 108, 111, 111, 112, + 95, 110, 111, 112, 15, 108, 111, 111, 112, 95, 97, 114, 105, 116, 104, 109, 101, 116, + 105, 99, 8, 108, 111, 111, 112, 95, 98, 99, 115, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 171, 205, 20, 99, 111, 109, 112, 105, 108, 97, 116, 105, 111, 110, 95, 109, 101, + 116, 97, 100, 97, 116, 97, 9, 0, 3, 50, 46, 48, 3, 50, 46, 49, 0, 0, + 4, 20, 5, 17, 10, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 10, + 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 17, 1, 11, 0, 6, 0, 0, 0, + 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 17, 2, 5, 13, + 5, 14, 5, 16, 5, 14, 5, 16, 2, 11, 0, 1, 5, 16, + ] +}); + #[rustfmt::skip] pub static MODULES_SIMPLE: Lazy>> = Lazy::new(|| { vec![ MODULE_SIMPLE_SIMPLE.to_vec(), diff --git a/docker/builder/builder.Dockerfile b/docker/builder/builder.Dockerfile index ce5c2243acbb0..d5c8ecb1571e4 100644 --- a/docker/builder/builder.Dockerfile +++ b/docker/builder/builder.Dockerfile @@ -3,21 +3,23 @@ FROM rust as rust-base WORKDIR /aptos + RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ + sed -i 's|http://deb.debian.org/debian|http://cloudfront.debian.net/debian|g' /etc/apt/sources.list && \ apt update && apt-get --no-install-recommends install -y \ - cmake \ - curl \ - clang \ - git \ - pkg-config \ - libssl-dev \ - libpq-dev \ - libdw-dev \ - binutils \ - lld \ - libudev-dev + binutils \ + clang \ + cmake \ + curl \ + git \ + libdw-dev \ + libpq-dev \ + libssl-dev \ + libudev-dev \ + lld \ + pkg-config ### Build Rust code ### FROM rust-base as builder-base diff --git a/docker/builder/debian-base.Dockerfile b/docker/builder/debian-base.Dockerfile index e27cb5c34263d..4bdaa397c68f0 100644 --- a/docker/builder/debian-base.Dockerfile +++ b/docker/builder/debian-base.Dockerfile @@ -6,6 +6,19 @@ ARG TARGETARCH RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + sed -i 's|http://deb.debian.org/debian|http://cloudfront.debian.net/debian|g' /etc/apt/sources.list && \ + apt-get update && apt-get --no-install-recommends --allow-downgrades -y install \ + ca-certificates \ + curl \ + iproute2 \ + libpq-dev \ + libssl1.1 \ + netcat \ + net-tools \ + tcpdump + # Add Tini to make sure the binaries receive proper SIGTERM signals when Docker is shut down ADD --chmod=755 https://github.com/krallin/tini/releases/download/v0.19.0/tini-$TARGETARCH /tini -ENTRYPOINT ["/tini", "--"] \ No newline at end of file +ENTRYPOINT ["/tini", "--"] diff --git a/docker/builder/faucet.Dockerfile b/docker/builder/faucet.Dockerfile index afa7c35c866d7..23ee5fc6f2f66 100644 --- a/docker/builder/faucet.Dockerfile +++ b/docker/builder/faucet.Dockerfile @@ -3,14 +3,8 @@ FROM debian-base AS faucet RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ + sed -i 's|http://security.debian.org/debian-security|https://cloudfront.debian.net/debian-security|g' /etc/apt/sources.list && \ apt-get update && apt-get --no-install-recommends install -y \ - libssl1.1 \ - ca-certificates \ - nano \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ procps RUN mkdir -p /aptos/client/data/wallet/ diff --git a/docker/builder/forge.Dockerfile b/docker/builder/forge.Dockerfile index b5ee2fc3bd8cd..70b2a62b473c4 100644 --- a/docker/builder/forge.Dockerfile +++ b/docker/builder/forge.Dockerfile @@ -3,16 +3,14 @@ FROM debian-base as forge RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - openssh-client \ - wget \ - busybox \ - git \ - unzip \ - awscli + awscli \ + busybox \ + git \ + openssh-client \ + unzip \ + wget WORKDIR /aptos diff --git a/docker/builder/indexer-grpc.Dockerfile b/docker/builder/indexer-grpc.Dockerfile index 2f79181084662..867f30a60b493 100644 --- a/docker/builder/indexer-grpc.Dockerfile +++ b/docker/builder/indexer-grpc.Dockerfile @@ -2,18 +2,6 @@ FROM debian-base AS indexer-grpc -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ - libpq-dev \ - curl - COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-cache-worker /usr/local/bin/aptos-indexer-grpc-cache-worker COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-file-store /usr/local/bin/aptos-indexer-grpc-file-store COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-data-service /usr/local/bin/aptos-indexer-grpc-data-service diff --git a/docker/builder/keyless-pepper-service.Dockerfile b/docker/builder/keyless-pepper-service.Dockerfile index fde68bca54f4b..1ecadbce7bf87 100644 --- a/docker/builder/keyless-pepper-service.Dockerfile +++ b/docker/builder/keyless-pepper-service.Dockerfile @@ -1,17 +1,5 @@ FROM debian-base AS keyless-pepper-service -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ - libpq-dev \ - curl - COPY --link --from=tools-builder /aptos/dist/aptos-keyless-pepper-service /usr/local/bin/aptos-keyless-pepper-service EXPOSE 8000 diff --git a/docker/builder/nft-metadata-crawler.Dockerfile b/docker/builder/nft-metadata-crawler.Dockerfile index 1b1d6998740ad..ddb5a1722153f 100644 --- a/docker/builder/nft-metadata-crawler.Dockerfile +++ b/docker/builder/nft-metadata-crawler.Dockerfile @@ -4,18 +4,6 @@ FROM indexer-builder FROM debian-base AS nft-metadata-crawler -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ - libpq-dev \ - curl - COPY --link --from=indexer-builder /aptos/dist/aptos-nft-metadata-crawler /usr/local/bin/aptos-nft-metadata-crawler # The health check port diff --git a/docker/builder/node-checker.Dockerfile b/docker/builder/node-checker.Dockerfile index d297f81eb3954..29941eac85c54 100644 --- a/docker/builder/node-checker.Dockerfile +++ b/docker/builder/node-checker.Dockerfile @@ -2,17 +2,6 @@ FROM debian-base AS node-checker -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ - libpq-dev - COPY --link --from=tools-builder /aptos/dist/aptos-node-checker /usr/local/bin/aptos-node-checker ENV RUST_LOG_FORMAT=json @@ -25,4 +14,4 @@ ENV GIT_TAG ${GIT_TAG} ARG GIT_BRANCH ENV GIT_BRANCH ${GIT_BRANCH} ARG GIT_SHA -ENV GIT_SHA ${GIT_SHA} \ No newline at end of file +ENV GIT_SHA ${GIT_SHA} diff --git a/docker/builder/telemetry-service.Dockerfile b/docker/builder/telemetry-service.Dockerfile index 92e589d3c3244..8de9deffa1017 100644 --- a/docker/builder/telemetry-service.Dockerfile +++ b/docker/builder/telemetry-service.Dockerfile @@ -1,17 +1,5 @@ FROM debian-base AS telemetry-service -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - net-tools \ - tcpdump \ - iproute2 \ - netcat \ - libpq-dev \ - curl - COPY --link --from=tools-builder /aptos/dist/aptos-telemetry-service /usr/local/bin/aptos-telemetry-service EXPOSE 8000 @@ -23,4 +11,4 @@ ENV GIT_TAG ${GIT_TAG} ARG GIT_BRANCH ENV GIT_BRANCH ${GIT_BRANCH} ARG GIT_SHA -ENV GIT_SHA ${GIT_SHA} \ No newline at end of file +ENV GIT_SHA ${GIT_SHA} diff --git a/docker/builder/tools.Dockerfile b/docker/builder/tools.Dockerfile index 9abfa55622247..16f2c1a3f74a3 100644 --- a/docker/builder/tools.Dockerfile +++ b/docker/builder/tools.Dockerfile @@ -1,9 +1,6 @@ ### Tools Image ### FROM debian-base AS tools -RUN echo "deb http://deb.debian.org/debian bullseye main" > /etc/apt/sources.list.d/bullseye.list && \ - echo "Package: *\nPin: release n=bullseye\nPin-Priority: 50" > /etc/apt/preferences.d/bullseye - RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get --no-install-recommends --allow-downgrades -y \ @@ -13,9 +10,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ perl-base=5.32.1-4+deb11u4 \ libtinfo6=6.2+20201114-2+deb11u2 \ git \ - libssl1.1 \ - ca-certificates \ - socat \ + socat \ python3-botocore/bullseye \ awscli/bullseye \ gnupg2 \ diff --git a/docker/builder/validator-testing.Dockerfile b/docker/builder/validator-testing.Dockerfile index 0171363e1d44c..cd6b499c7f06c 100644 --- a/docker/builder/validator-testing.Dockerfile +++ b/docker/builder/validator-testing.Dockerfile @@ -5,29 +5,23 @@ FROM debian-base as validator-testing-base RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install -y --no-install-recommends \ - libssl1.1 \ - ca-certificates \ - # Needed to run debugging tools like perf - linux-perf \ - sudo \ - procps \ - gdb \ - curl \ - # postgres client lib required for indexer - libpq-dev \ - # Extra goodies for debugging - less \ - git \ - vim \ - nano \ - libjemalloc-dev \ - binutils \ - graphviz \ - ghostscript \ - strace \ - htop \ - sysstat \ - valgrind + # Needed to run debugging tools like perf + gdb \ + linux-perf \ + procps \ + sudo \ + # Extra goodies for debugging + binutils \ + ghostscript \ + git \ + graphviz \ + htop \ + less \ + libjemalloc-dev \ + strace \ + sysstat \ + valgrind \ + vim FROM node-builder diff --git a/docker/builder/validator.Dockerfile b/docker/builder/validator.Dockerfile index db356cf63ed71..905e0aee3c9eb 100644 --- a/docker/builder/validator.Dockerfile +++ b/docker/builder/validator.Dockerfile @@ -7,18 +7,13 @@ FROM tools-builder FROM debian-base AS validator RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ # Needed to run debugging tools like perf linux-perf \ sudo \ procps \ - gdb \ - curl \ - # postgres client lib required for indexer - libpq-dev + gdb ### Because build machine perf might not match run machine perf, we have to symlink ### Even if version slightly off, still mostly works diff --git a/execution/executor-benchmark/src/account_generator.rs b/execution/executor-benchmark/src/account_generator.rs index 4e7defd846868..2d08e0a177459 100644 --- a/execution/executor-benchmark/src/account_generator.rs +++ b/execution/executor-benchmark/src/account_generator.rs @@ -17,15 +17,15 @@ impl AccountGenerator { const SEED_ACCOUNTS_ROOT_SEED: u64 = u64::max_value(); const USER_ACCOUNTS_ROOT_SEED: u64 = 0; - pub fn new_for_seed_accounts() -> Self { - Self::new(Self::SEED_ACCOUNTS_ROOT_SEED, 0) + pub fn new_for_seed_accounts(is_keyless: bool) -> Self { + Self::new(Self::SEED_ACCOUNTS_ROOT_SEED, 0, is_keyless) } - pub fn new_for_user_accounts(num_to_skip: u64) -> Self { - Self::new(Self::USER_ACCOUNTS_ROOT_SEED, num_to_skip) + pub fn new_for_user_accounts(num_to_skip: u64, is_keyless: bool) -> Self { + Self::new(Self::USER_ACCOUNTS_ROOT_SEED, num_to_skip, is_keyless) } - fn new(root_seed: u64, num_to_skip: u64) -> Self { + fn new(root_seed: u64, num_to_skip: u64, is_keyless: bool) -> Self { let mut root_rng = StdRng::seed_from_u64(root_seed); let num_rngs_to_skip = num_to_skip / Self::MAX_ACCOUNT_GEN_PER_RNG; for _ in 0..num_rngs_to_skip { @@ -35,14 +35,20 @@ impl AccountGenerator { let mut active_rng_quota = Self::MAX_ACCOUNT_GEN_PER_RNG - active_rng_to_skip; let mut active_rng = StdRng::seed_from_u64(root_rng.next_u64()); for _ in 0..active_rng_to_skip { - LocalAccount::generate(&mut active_rng); + LocalAccount::generate_for_testing(&mut active_rng, is_keyless); } let (sender, receiver) = mpsc::sync_channel(100 /* bound */); std::thread::Builder::new() .name("account_generator".to_string()) .spawn(move || { - while sender.send(LocalAccount::generate(&mut active_rng)).is_ok() { + while sender + .send(LocalAccount::generate_for_testing( + &mut active_rng, + is_keyless, + )) + .is_ok() + { active_rng_quota -= 1; if active_rng_quota == 0 { active_rng = StdRng::seed_from_u64(root_rng.next_u64()); diff --git a/execution/executor-benchmark/src/db_generator.rs b/execution/executor-benchmark/src/db_generator.rs index ce2ca5c2630ee..fff814fec00a3 100644 --- a/execution/executor-benchmark/src/db_generator.rs +++ b/execution/executor-benchmark/src/db_generator.rs @@ -13,9 +13,17 @@ use aptos_config::{ use aptos_db::AptosDB; use aptos_executor::db_bootstrapper::{generate_waypoint, maybe_bootstrap}; use aptos_storage_interface::DbReaderWriter; -use aptos_types::on_chain_config::Features; +use aptos_types::{ + jwks::{jwk::JWK, patch::IssuerJWK}, + keyless::{ + circuit_constants::TEST_GROTH16_SETUP, + test_utils::{get_sample_iss, get_sample_jwk}, + Groth16VerificationKey, + }, + on_chain_config::Features, +}; use aptos_vm::{aptos_vm::AptosVMBlockExecutor, VMBlockExecutor}; -use std::{fs, path::Path}; +use std::{fs, path::Path, sync::Arc}; pub fn create_db_with_accounts( num_accounts: usize, @@ -27,6 +35,7 @@ pub fn create_db_with_accounts( enable_storage_sharding: bool, pipeline_config: PipelineConfig, init_features: Features, + is_keyless: bool, ) where V: VMBlockExecutor + 'static, { @@ -56,6 +65,7 @@ pub fn create_db_with_accounts( enable_storage_sharding, pipeline_config, init_features, + is_keyless, ); } @@ -65,7 +75,16 @@ pub(crate) fn bootstrap_with_genesis( init_features: Features, ) { let (config, _genesis_key) = - aptos_genesis::test_utils::test_config_with_custom_features(init_features); + aptos_genesis::test_utils::test_config_with_custom_onchain(Some(Arc::new(move |config| { + config.initial_features_override = Some(init_features.clone()); + config.initial_jwks = vec![IssuerJWK { + issuer: get_sample_iss(), + jwk: JWK::RSA(get_sample_jwk()), + }]; + config.keyless_groth16_vk_override = Some(Groth16VerificationKey::from( + &TEST_GROTH16_SETUP.prepared_vk, + )); + }))); let mut rocksdb_configs = RocksdbConfigs::default(); rocksdb_configs.state_merkle_db_config.max_open_files = -1; diff --git a/execution/executor-benchmark/src/lib.rs b/execution/executor-benchmark/src/lib.rs index 7079ce0a9ca97..ef9b88e284c16 100644 --- a/execution/executor-benchmark/src/lib.rs +++ b/execution/executor-benchmark/src/lib.rs @@ -43,7 +43,7 @@ use aptos_transaction_generator_lib::{ create_txn_generator_creator, AlwaysApproveRootAccountHandle, TransactionGeneratorCreator, TransactionType::{self, CoinTransfer}, }; -use aptos_types::on_chain_config::Features; +use aptos_types::on_chain_config::{FeatureFlag, Features}; use aptos_vm::VMBlockExecutor; use db_reliable_submitter::DbReliableTransactionSubmitter; use metrics::TIMER; @@ -57,6 +57,12 @@ use std::{ }; use tokio::runtime::Runtime; +pub fn default_benchmark_features() -> Features { + let mut init_features = Features::default(); + init_features.disable(FeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH); + init_features +} + pub fn init_db_and_executor(config: &NodeConfig) -> (DbReaderWriter, BlockExecutor) where V: VMBlockExecutor, @@ -120,6 +126,7 @@ pub fn run_benchmark( enable_storage_sharding: bool, pipeline_config: PipelineConfig, init_features: Features, + is_keyless: bool, ) where V: VMBlockExecutor + 'static, { @@ -164,6 +171,7 @@ pub fn run_benchmark( db.reader.clone(), num_accounts_to_be_loaded, num_accounts_to_skip, + is_keyless, ); let (main_signer_accounts, burner_accounts) = accounts_cache.split(num_main_signer_accounts); @@ -220,6 +228,7 @@ pub fn run_benchmark( source_dir, Some(num_accounts_to_load), pipeline_config.num_generator_workers, + is_keyless, ); let mut overall_measuring = OverallMeasuring::start(); @@ -341,6 +350,7 @@ pub fn add_accounts( enable_storage_sharding: bool, pipeline_config: PipelineConfig, init_features: Features, + is_keyless: bool, ) where V: VMBlockExecutor + 'static, { @@ -361,6 +371,7 @@ pub fn add_accounts( enable_storage_sharding, pipeline_config, init_features, + is_keyless, ); } @@ -375,6 +386,7 @@ fn add_accounts_impl( enable_storage_sharding: bool, pipeline_config: PipelineConfig, init_features: Features, + is_keyless: bool, ) where V: VMBlockExecutor + 'static, { @@ -401,6 +413,7 @@ fn add_accounts_impl( &source_dir, None, pipeline_config.num_generator_workers, + is_keyless, ); let start_time = Instant::now(); @@ -410,6 +423,7 @@ fn add_accounts_impl( num_new_accounts, init_account_balance, block_size, + is_keyless, ); generator.drop_sender(); pipeline.start_pipeline_processing(); @@ -770,7 +784,7 @@ fn log_total_supply(db_reader: &Arc) { #[cfg(test)] mod tests { use crate::{ - db_generator::bootstrap_with_genesis, init_db_and_executor, + db_generator::bootstrap_with_genesis, default_benchmark_features, init_db_and_executor, native::native_config::NativeConfig, pipeline::PipelineConfig, transaction_executor::BENCHMARKS_BLOCK_EXECUTOR_ONCHAIN_CONFIG, transaction_generator::TransactionGenerator, BenchmarkWorkload, @@ -781,10 +795,7 @@ mod tests { use aptos_sdk::{transaction_builder::aptos_stdlib, types::LocalAccount}; use aptos_temppath::TempPath; use aptos_transaction_generator_lib::{args::TransactionTypeArg, WorkflowProgress}; - use aptos_types::{ - on_chain_config::{FeatureFlag, Features}, - transaction::Transaction, - }; + use aptos_types::{on_chain_config::FeatureFlag, transaction::Transaction}; use aptos_vm::{aptos_vm::AptosVMBlockExecutor, AptosVM, VMBlockExecutor}; use rand::thread_rng; use std::fs; @@ -797,7 +808,7 @@ mod tests { fs::create_dir_all(db_dir.as_ref()).unwrap(); - let mut init_features = Features::default(); + let mut init_features = default_benchmark_features(); init_features.enable(FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE); init_features.enable(FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE); @@ -890,7 +901,7 @@ mod tests { println!("db_generator::create_db_with_accounts"); - let mut features = Features::default(); + let mut features = default_benchmark_features(); features.enable(FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE); features.enable(FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE); @@ -905,6 +916,7 @@ mod tests { false, PipelineConfig::default(), features.clone(), + false, ); println!("run_benchmark"); @@ -935,6 +947,7 @@ mod tests { false, PipelineConfig::default(), features, + false, ); } diff --git a/execution/executor-benchmark/src/main.rs b/execution/executor-benchmark/src/main.rs index 293413abf8be0..90e479caa932b 100644 --- a/execution/executor-benchmark/src/main.rs +++ b/execution/executor-benchmark/src/main.rs @@ -14,8 +14,8 @@ use aptos_config::config::{ EpochSnapshotPrunerConfig, LedgerPrunerConfig, PrunerConfig, StateMerklePrunerConfig, }; use aptos_executor_benchmark::{ - native::native_config::NativeConfig, native_executor::NativeExecutor, pipeline::PipelineConfig, - BenchmarkWorkload, + default_benchmark_features, native::native_config::NativeConfig, + native_executor::NativeExecutor, pipeline::PipelineConfig, BenchmarkWorkload, }; use aptos_executor_service::remote_executor_client; use aptos_experimental_ptx_executor::PtxBlockExecutor; @@ -243,6 +243,9 @@ enum BlockExecutorTypeOpt { #[derive(Parser, Debug)] struct Opt { + #[clap(long)] + use_keyless_accounts: bool, + #[clap(long, default_value_t = 10000)] block_size: usize, @@ -410,7 +413,7 @@ fn get_init_features( "Enable and disable feature flags cannot overlap." ); - let mut init_features = Features::default(); + let mut init_features = default_benchmark_features(); for feature in enable_feature.iter() { init_features.enable(*feature); } @@ -442,6 +445,7 @@ where opt.enable_storage_sharding, opt.pipeline_opt.pipeline_config(), get_init_features(enable_feature, disable_feature), + opt.use_keyless_accounts, ); }, Command::RunExecutor { @@ -503,6 +507,7 @@ where opt.enable_storage_sharding, opt.pipeline_opt.pipeline_config(), get_init_features(enable_feature, disable_feature), + opt.use_keyless_accounts, ); }, Command::AddAccounts { @@ -522,6 +527,7 @@ where opt.enable_storage_sharding, opt.pipeline_opt.pipeline_config(), Features::default(), + opt.use_keyless_accounts, ); }, } diff --git a/execution/executor-benchmark/src/transaction_generator.rs b/execution/executor-benchmark/src/transaction_generator.rs index dde5b1a40454f..4703fa1a905b0 100644 --- a/execution/executor-benchmark/src/transaction_generator.rs +++ b/execution/executor-benchmark/src/transaction_generator.rs @@ -8,7 +8,10 @@ use crate::{ }; use aptos_crypto::ed25519::Ed25519PrivateKey; use aptos_logger::info; -use aptos_sdk::{transaction_builder::TransactionFactory, types::LocalAccount}; +use aptos_sdk::{ + transaction_builder::{aptos_stdlib, TransactionFactory}, + types::LocalAccount, +}; use aptos_storage_interface::{state_view::LatestDbStateCheckpointView, DbReader, DbReaderWriter}; use aptos_types::{ account_address::AccountAddress, @@ -154,11 +157,12 @@ impl TransactionGenerator { reader: Arc, num_accounts: usize, num_to_skip: usize, + is_keyless: bool, ) -> AccountCache { Self::resync_sequence_numbers( reader, Self::gen_account_cache( - AccountGenerator::new_for_user_accounts(num_to_skip as u64), + AccountGenerator::new_for_user_accounts(num_to_skip as u64, is_keyless), num_accounts, "user", ), @@ -166,11 +170,15 @@ impl TransactionGenerator { ) } - fn gen_seed_account_cache(reader: Arc, num_accounts: usize) -> AccountCache { + fn gen_seed_account_cache( + reader: Arc, + num_accounts: usize, + is_keyless: bool, + ) -> AccountCache { Self::resync_sequence_numbers( reader, Self::gen_account_cache( - AccountGenerator::new_for_seed_accounts(), + AccountGenerator::new_for_seed_accounts(is_keyless), num_accounts, "seed", ), @@ -185,6 +193,7 @@ impl TransactionGenerator { db_dir: P, num_main_signer_accounts: Option, num_workers: usize, + is_keyless: bool, ) -> Self { let num_existing_accounts = TransactionGenerator::read_meta(&db_dir); @@ -194,7 +203,7 @@ impl TransactionGenerator { main_signer_accounts: num_main_signer_accounts.map(|num_main_signer_accounts| { let num_cached_accounts = std::cmp::min(num_existing_accounts, num_main_signer_accounts); - Self::gen_user_account_cache(db.reader.clone(), num_cached_accounts, 0) + Self::gen_user_account_cache(db.reader.clone(), num_cached_accounts, 0, is_keyless) }), num_existing_accounts, block_sender: Some(block_sender), @@ -256,6 +265,7 @@ impl TransactionGenerator { num_new_accounts: usize, init_account_balance: u64, block_size: usize, + is_keyless: bool, ) { assert!(self.block_sender.is_some()); // Ensure that seed accounts have enough balance to transfer money to at least 10000 account with @@ -265,12 +275,14 @@ impl TransactionGenerator { num_new_accounts, block_size, init_account_balance * 10_000, + is_keyless, ); self.create_and_fund_accounts( num_existing_accounts, num_new_accounts, init_account_balance, block_size, + is_keyless, ); } @@ -351,11 +363,13 @@ impl TransactionGenerator { num_new_accounts: usize, block_size: usize, seed_account_balance: u64, + is_keyless: bool, ) { // We don't store the # of existing seed accounts now. Thus here we just blindly re-create // and re-mint seed accounts here. let num_seed_accounts = (num_new_accounts / 1000).clamp(1, 100000); - let seed_accounts_cache = Self::gen_seed_account_cache(reader, num_seed_accounts); + let seed_accounts_cache = + Self::gen_seed_account_cache(reader, num_seed_accounts, is_keyless); println!( "[{}] Generating {} seed account creation txns, with {} coins.", @@ -374,13 +388,12 @@ impl TransactionGenerator { let transactions: Vec<_> = chunk .iter() .map(|new_account| { - let txn = self.root_account.sign_with_transaction_builder( - self.transaction_factory - .implicitly_create_user_account_and_transfer( - new_account.public_key(), - seed_account_balance, - ), + let payload = aptos_stdlib::aptos_account_transfer( + new_account.authentication_key().account_address(), + seed_account_balance, ); + let builder = self.transaction_factory.payload(payload); + let txn = self.root_account.sign_with_transaction_builder(builder); Transaction::UserTransaction(txn) }) .collect(); @@ -401,13 +414,15 @@ impl TransactionGenerator { num_new_accounts: usize, init_account_balance: u64, block_size: usize, + is_keyless: bool, ) { println!( "[{}] Generating {} account creation txns.", now_fmt!(), num_new_accounts ); - let mut generator = AccountGenerator::new_for_user_accounts(num_existing_accounts as u64); + let mut generator = + AccountGenerator::new_for_user_accounts(num_existing_accounts as u64, is_keyless); println!("Skipped first {} existing accounts.", num_existing_accounts); let bar = get_progress_bar(num_new_accounts); @@ -431,13 +446,12 @@ impl TransactionGenerator { Arc::new(AtomicUsize::new(0)), |(sender_idx, new_account), account_cache| { let sender = &account_cache.accounts[sender_idx]; - let txn = sender.sign_with_transaction_builder( - self.transaction_factory - .implicitly_create_user_account_and_transfer( - new_account.public_key(), - init_account_balance, - ), + let payload = aptos_stdlib::aptos_account_transfer( + new_account.authentication_key().account_address(), + init_account_balance, ); + let txn = sender + .sign_with_transaction_builder(self.transaction_factory.payload(payload)); Some(Transaction::UserTransaction(txn)) }, |(sender_idx, _)| *sender_idx, diff --git a/execution/executor-types/Cargo.toml b/execution/executor-types/Cargo.toml index 4a4dbaf987799..f10419375c0fb 100644 --- a/execution/executor-types/Cargo.toml +++ b/execution/executor-types/Cargo.toml @@ -17,6 +17,7 @@ anyhow = { workspace = true } aptos-crypto = { workspace = true } aptos-drop-helper = { workspace = true } aptos-infallible = { workspace = true } +aptos-metrics-core = { workspace = true } aptos-scratchpad = { workspace = true } aptos-secure-net = { workspace = true } aptos-storage-interface = { workspace = true } diff --git a/execution/executor-types/src/execution_output.rs b/execution/executor-types/src/execution_output.rs index b9e2ac4806144..fb2d63f7b0897 100644 --- a/execution/executor-types/src/execution_output.rs +++ b/execution/executor-types/src/execution_output.rs @@ -38,11 +38,7 @@ impl ExecutionOutput { ) -> Self { if is_block { // If it's a block, ensure it ends with state checkpoint. - assert!( - next_epoch_state.is_some() - || to_commit.is_empty() // reconfig suffix - || to_commit.transactions.last().unwrap().is_non_reconfig_block_ending() - ); + assert!(to_commit.is_empty() || to_commit.ends_with_sole_checkpoint()); } else { // If it's not, there shouldn't be any transaction to be discarded or retried. assert!(to_discard.is_empty() && to_retry.is_empty()); @@ -168,31 +164,29 @@ impl Inner { let aborts = self .to_commit .iter() - .flat_map( - |(txn, output, _is_reconfig)| match output.status().status() { - Ok(execution_status) => { - if execution_status.is_success() { - None - } else { - Some(format!("{:?}: {:?}", txn, output.status())) - } - }, - Err(_) => None, + .flat_map(|(txn, output)| match output.status().status() { + Ok(execution_status) => { + if execution_status.is_success() { + None + } else { + Some(format!("{:?}: {:?}", txn, output.status())) + } }, - ) + Err(_) => None, + }) .collect::>(); let discards_3 = self .to_discard .iter() .take(3) - .map(|(txn, output, _is_reconfig)| format!("{:?}: {:?}", txn, output.status())) + .map(|(txn, output)| format!("{:?}: {:?}", txn, output.status())) .collect::>(); let retries_3 = self .to_retry .iter() .take(3) - .map(|(txn, output, _is_reconfig)| format!("{:?}: {:?}", txn, output.status())) + .map(|(txn, output)| format!("{:?}: {:?}", txn, output.status())) .collect::>(); if !aborts.is_empty() || !discards_3.is_empty() || !retries_3.is_empty() { diff --git a/execution/executor-types/src/lib.rs b/execution/executor-types/src/lib.rs index 48de1b896fa30..fab455b1c7c77 100644 --- a/execution/executor-types/src/lib.rs +++ b/execution/executor-types/src/lib.rs @@ -36,6 +36,7 @@ use std::{ mod error; pub mod execution_output; mod ledger_update_output; +mod metrics; pub mod planned; pub mod state_checkpoint_output; pub mod state_compute_result; diff --git a/execution/executor-types/src/metrics.rs b/execution/executor-types/src/metrics.rs new file mode 100644 index 0000000000000..d86cfcba3ad31 --- /dev/null +++ b/execution/executor-types/src/metrics.rs @@ -0,0 +1,17 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_metrics_core::{exponential_buckets, register_histogram_vec, HistogramVec}; +use once_cell::sync::Lazy; + +pub static TIMER: Lazy = Lazy::new(|| { + register_histogram_vec!( + // metric name + "aptos_executor_types_timer", + // metric description + "The time spent in seconds.", + &["name"], + exponential_buckets(/*start=*/ 1e-3, /*factor=*/ 2.0, /*count=*/ 20).unwrap(), + ) + .unwrap() +}); diff --git a/execution/executor-types/src/planned.rs b/execution/executor-types/src/planned.rs index e16206ce9b634..54b4145991ae3 100644 --- a/execution/executor-types/src/planned.rs +++ b/execution/executor-types/src/planned.rs @@ -1,7 +1,9 @@ // Copyright (c) Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::metrics::TIMER; use aptos_infallible::Mutex; +use aptos_metrics_core::TimerHelper; use once_cell::sync::OnceCell; use rayon::ThreadPool; use std::{ops::Deref, sync::mpsc::Receiver}; @@ -40,10 +42,12 @@ impl Planned { } } - pub fn get(&self) -> &T { + pub fn get(&self, name_for_timer: Option<&str>) -> &T { if let Some(t) = self.value.get() { t } else { + let _timer = name_for_timer.map(|name| TIMER.timer_with(&[name])); + let rx = self.rx.get().expect("Not planned").lock(); if self.value.get().is_none() { let t = rx.recv().expect("Plan failed."); @@ -58,7 +62,7 @@ impl Deref for Planned { type Target = T; fn deref(&self) -> &Self::Target { - self.get() + self.get(None) } } diff --git a/execution/executor-types/src/state_compute_result.rs b/execution/executor-types/src/state_compute_result.rs index 390818228b8da..1d5748570d4cc 100644 --- a/execution/executor-types/src/state_compute_result.rs +++ b/execution/executor-types/src/state_compute_result.rs @@ -145,7 +145,11 @@ impl StateComputeResult { pub fn make_chunk_commit_notification(&self) -> ChunkCommitNotification { ChunkCommitNotification { - subscribable_events: self.execution_output.subscribable_events.clone(), + subscribable_events: self + .execution_output + .subscribable_events + .get(Some("wait_for_subscribable_events")) + .clone(), committed_transactions: self.execution_output.to_commit.txns().to_vec(), reconfiguration_occurred: self.execution_output.next_epoch_state.is_some(), } diff --git a/execution/executor-types/src/transactions_with_output.rs b/execution/executor-types/src/transactions_with_output.rs index 54204f6608d96..bd157c35a34e5 100644 --- a/execution/executor-types/src/transactions_with_output.rs +++ b/execution/executor-types/src/transactions_with_output.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::metrics::TIMER; +use aptos_metrics_core::TimerHelper; use aptos_types::transaction::{Transaction, TransactionOutput}; use itertools::izip; @@ -8,21 +10,20 @@ use itertools::izip; pub struct TransactionsWithOutput { pub transactions: Vec, pub transaction_outputs: Vec, - pub epoch_ending_flags: Vec, + pub is_reconfig: bool, } impl TransactionsWithOutput { pub fn new( transactions: Vec, transaction_outputs: Vec, - epoch_ending_flags: Vec, + is_reconfig: bool, ) -> Self { assert_eq!(transactions.len(), transaction_outputs.len()); - assert_eq!(transactions.len(), epoch_ending_flags.len()); Self { transactions, transaction_outputs, - epoch_ending_flags, + is_reconfig, } } @@ -32,8 +33,7 @@ impl TransactionsWithOutput { pub fn new_dummy_success(txns: Vec) -> Self { let txn_outputs = vec![TransactionOutput::new_empty_success(); txns.len()]; - let epoch_ending_flags = vec![false; txns.len()]; - Self::new(txns, txn_outputs, epoch_ending_flags) + Self::new(txns, txn_outputs, false) } pub fn push( @@ -42,9 +42,12 @@ impl TransactionsWithOutput { transaction_output: TransactionOutput, is_reconfig: bool, ) { + // can't add more txns after reconfig + assert!(!self.is_reconfig); + self.transactions.push(transaction); self.transaction_outputs.push(transaction_output); - self.epoch_ending_flags.push(is_reconfig); + self.is_reconfig = is_reconfig; } pub fn len(&self) -> usize { @@ -64,31 +67,31 @@ impl TransactionsWithOutput { } pub fn get_last_checkpoint_index(&self) -> Option { + if self.is_reconfig { + return Some(self.len() - 1); + } + (0..self.len()) .rev() - .find(|&i| Self::need_checkpoint(&self.transactions[i], self.epoch_ending_flags[i])) + .find(|&i| self.transactions[i].is_non_reconfig_block_ending()) } - pub fn need_checkpoint(txn: &Transaction, is_reconfig: bool) -> bool { - if is_reconfig { - return true; - } - match txn { - Transaction::BlockMetadata(_) - | Transaction::BlockMetadataExt(_) - | Transaction::UserTransaction(_) - | Transaction::ValidatorTransaction(_) => false, - Transaction::GenesisTransaction(_) - | Transaction::StateCheckpoint(_) - | Transaction::BlockEpilogue(_) => true, - } + pub fn iter(&self) -> impl Iterator { + izip!(self.transactions.iter(), self.transaction_outputs.iter(),) } - pub fn iter(&self) -> impl Iterator { - izip!( - self.transactions.iter(), - self.transaction_outputs.iter(), - self.epoch_ending_flags.iter().cloned() - ) + pub fn ends_with_sole_checkpoint(&self) -> bool { + let _timer = TIMER.timer_with(&["ends_with_sole_checkpoint"]); + if self.is_reconfig { + !self + .txns() + .iter() + .any(Transaction::is_non_reconfig_block_ending) + } else { + self.txns() + .iter() + .position(Transaction::is_non_reconfig_block_ending) + == Some(self.len() - 1) + } } } diff --git a/execution/executor/src/tests/mock_vm/mod.rs b/execution/executor/src/tests/mock_vm/mod.rs index 23816899fb286..b1ab87984c7a4 100644 --- a/execution/executor/src/tests/mock_vm/mod.rs +++ b/execution/executor/src/tests/mock_vm/mod.rs @@ -57,6 +57,8 @@ pub static KEEP_STATUS: Lazy = pub static DISCARD_STATUS: Lazy = Lazy::new(|| TransactionStatus::Discard(StatusCode::INSUFFICIENT_BALANCE_FOR_TRANSACTION_FEE)); +pub static RETRY_STATUS: Lazy = Lazy::new(|| TransactionStatus::Retry); + pub struct MockVM; impl VMBlockExecutor for MockVM { @@ -76,7 +78,19 @@ impl VMBlockExecutor for MockVM { let mut output_cache = HashMap::new(); let mut outputs = vec![]; + let mut skip_rest = false; for idx in 0..txn_provider.num_txns() { + if skip_rest { + outputs.push(TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + RETRY_STATUS.clone(), + TransactionAuxiliaryData::default(), + )); + continue; + } + let txn = txn_provider.get_txn(idx as u32).expect_valid(); if matches!(txn, Transaction::StateCheckpoint(_)) { outputs.push(TransactionOutput::new( @@ -110,6 +124,7 @@ impl VMBlockExecutor for MockVM { KEEP_STATUS.clone(), TransactionAuxiliaryData::default(), )); + skip_rest = true; continue; } diff --git a/execution/executor/src/types/in_memory_state_calculator_v2.rs b/execution/executor/src/types/in_memory_state_calculator_v2.rs index 31c19716fd7a2..1462bbb705aa6 100644 --- a/execution/executor/src/types/in_memory_state_calculator_v2.rs +++ b/execution/executor/src/types/in_memory_state_calculator_v2.rs @@ -49,7 +49,10 @@ impl InMemoryStateCalculatorV2 { // If there are multiple checkpoints in the chunk, we only calculate the SMT (and its root // hash) for the last one. - let last_checkpoint_index = execution_output.to_commit.get_last_checkpoint_index(); + let last_checkpoint_index = { + let _timer = OTHER_TIMERS.timer_with(&["get_last_checkpoint_index"]); + execution_output.to_commit.get_last_checkpoint_index() + }; Self::calculate_impl( parent_state, @@ -371,14 +374,6 @@ impl InMemoryStateCalculatorV2 { "Base state is corrupted, updates_since_base is not empty at a checkpoint." ); - for (i, (txn, _txn_out, is_reconfig)) in to_commit.iter().enumerate() { - ensure!( - TransactionsWithOutput::need_checkpoint(txn, is_reconfig) ^ (i != num_txns - 1), - "Checkpoint is allowed iff it's the last txn in the block. index: {i}, num_txns: {num_txns}, is_last: {}, txn: {txn:?}, is_reconfig: {}", - i == num_txns - 1, - is_reconfig, - ); - } Ok(()) } } diff --git a/execution/executor/src/workflow/do_get_execution_output.rs b/execution/executor/src/workflow/do_get_execution_output.rs index 1c4d0ee8663d0..2a7d45795a6ca 100644 --- a/execution/executor/src/workflow/do_get_execution_output.rs +++ b/execution/executor/src/workflow/do_get_execution_output.rs @@ -45,7 +45,7 @@ use aptos_types::{ }; use aptos_vm::VMBlockExecutor; use itertools::Itertools; -use std::{iter, sync::Arc}; +use std::sync::Arc; pub struct DoGetExecutionOutput; @@ -288,51 +288,31 @@ impl Parser { append_state_checkpoint_to_block: Option, ) -> Result { let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output"]); + let is_block = append_state_checkpoint_to_block.is_some(); - // Parse all outputs. - let mut epoch_ending_flags = { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__epoch_ending_flags"]); + // Collect all statuses. + let statuses_for_input_txns = { + let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__all_statuses"]); transaction_outputs .iter() - .map(TransactionOutput::has_new_epoch_event) + .map(|t| t.status()) + .cloned() .collect_vec() }; // Isolate retries. - let (to_retry, has_reconfig) = { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__retries"]); - Self::extract_retries( - &mut transactions, - &mut transaction_outputs, - &mut epoch_ending_flags, - ) - }; - - // Collect all statuses. - let statuses_for_input_txns = { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__all_statuses"]); - let keeps_and_discards = transaction_outputs.iter().map(|t| t.status()).cloned(); - // Forcibly overwriting statuses for retries, since VM can output otherwise. - let retries = iter::repeat(TransactionStatus::Retry).take(to_retry.len()); - keeps_and_discards.chain(retries).collect() - }; + let (to_retry, has_reconfig) = + Self::extract_retries(&mut transactions, &mut transaction_outputs); // Isolate discards. - let to_discard = { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__discards"]); - Self::extract_discards( - &mut transactions, - &mut transaction_outputs, - &mut epoch_ending_flags, - ) - }; + let to_discard = Self::extract_discards(&mut transactions, &mut transaction_outputs); // The rest is to be committed, attach block epilogue as needed and optionally get next EpochState. let to_commit = { let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__to_commit"]); let to_commit = - TransactionsWithOutput::new(transactions, transaction_outputs, epoch_ending_flags); + TransactionsWithOutput::new(transactions, transaction_outputs, has_reconfig); Self::maybe_add_block_epilogue( to_commit, has_reconfig, @@ -380,48 +360,34 @@ impl Parser { fn extract_retries( transactions: &mut Vec, transaction_outputs: &mut Vec, - epoch_ending_flags: &mut Vec, ) -> (TransactionsWithOutput, bool) { - // N.B. off-by-1 intentionally, for exclusive index - let new_epoch_marker = epoch_ending_flags - .iter() - .rposition(|f| *f) - .map(|idx| idx + 1); + let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__retries"]); - let block_gas_limit_marker = transaction_outputs + let last_non_retry = transaction_outputs .iter() - .position(|o| matches!(o.status(), TransactionStatus::Retry)); - - // Transactions after the epoch ending txn are all to be retried. - // Transactions after the txn that exceeded per-block gas limit are also to be retried. - if let Some(pos) = new_epoch_marker { - ( - TransactionsWithOutput::new( - transactions.drain(pos..).collect(), - transaction_outputs.drain(pos..).collect(), - epoch_ending_flags.drain(pos..).collect(), - ), - true, - ) - } else if let Some(pos) = block_gas_limit_marker { - ( - TransactionsWithOutput::new( - transactions.drain(pos..).collect(), - transaction_outputs.drain(pos..).collect(), - epoch_ending_flags.drain(pos..).collect(), - ), - false, - ) + .rposition(|t| !t.status().is_retry()); + let is_reconfig = if let Some(idx) = last_non_retry { + transaction_outputs[idx].has_new_epoch_event() } else { - (TransactionsWithOutput::new_empty(), false) - } + false + }; + + let first_retry = last_non_retry.map_or(0, |pos| pos + 1); + let to_retry = TransactionsWithOutput::new( + transactions.drain(first_retry..).collect(), + transaction_outputs.drain(first_retry..).collect(), + false, // is_reconfig + ); + + (to_retry, is_reconfig) } fn extract_discards( transactions: &mut Vec, transaction_outputs: &mut Vec, - epoch_ending_flags: &mut Vec, ) -> TransactionsWithOutput { + let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__discards"]); + let to_discard = { let mut res = TransactionsWithOutput::new_empty(); for idx in 0..transactions.len() { @@ -429,25 +395,23 @@ impl Parser { res.push( transactions[idx].clone(), transaction_outputs[idx].clone(), - epoch_ending_flags[idx], + false, ); } else if !res.is_empty() { transactions[idx - res.len()] = transactions[idx].clone(); transaction_outputs[idx - res.len()] = transaction_outputs[idx].clone(); - epoch_ending_flags[idx - res.len()] = epoch_ending_flags[idx]; } } if !res.is_empty() { let remaining = transactions.len() - res.len(); transactions.truncate(remaining); transaction_outputs.truncate(remaining); - epoch_ending_flags.truncate(remaining); } res }; // Sanity check transactions with the Discard status: - to_discard.iter().for_each(|(t, o, _flag)| { + to_discard.iter().for_each(|(t, o)| { // In case a new status other than Retry, Keep and Discard is added: if !matches!(o.status(), TransactionStatus::Discard(_)) { error!("Status other than Retry, Keep or Discard; Transaction discarded."); diff --git a/execution/executor/src/workflow/do_ledger_update.rs b/execution/executor/src/workflow/do_ledger_update.rs index 0ddf54e37c737..0877e619ce75e 100644 --- a/execution/executor/src/workflow/do_ledger_update.rs +++ b/execution/executor/src/workflow/do_ledger_update.rs @@ -93,12 +93,7 @@ impl DoLedgerUpdate { writeset_hashes ) .map( - |( - (txn, txn_out, _is_reconfig), - state_checkpoint_hash, - event_root_hash, - write_set_hash, - )| { + |((txn, txn_out), state_checkpoint_hash, event_root_hash, write_set_hash)| { TransactionInfo::new( txn.hash(), write_set_hash, diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 26e12125ab42b..2bcd8106bca34 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -25,6 +25,7 @@ bcs = { workspace = true } ed25519-dalek-bip32 = { workspace = true } hex = { workspace = true } move-core-types = { workspace = true } +rand = { workspace = true } rand_core = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } diff --git a/sdk/src/types.rs b/sdk/src/types.rs index 941cdf0a4c55e..22084b72bdc24 100644 --- a/sdk/src/types.rs +++ b/sdk/src/types.rs @@ -24,13 +24,14 @@ use aptos_types::{ event::EventKey, keyless::{ Claims, Configuration, EphemeralCertificate, IdCommitment, KeylessPublicKey, - KeylessSignature, OpenIdSig, Pepper, TransactionAndProof, ZeroKnowledgeSig, + KeylessSignature, OpenIdSig, Pepper, ZeroKnowledgeSig, }, transaction::authenticator::{AnyPublicKey, EphemeralPublicKey, EphemeralSignature}, }; use bip39::{Language, Mnemonic, Seed}; use ed25519_dalek_bip32::{DerivationPath, ExtendedSecretKey}; use keyless::FederatedKeylessPublicKey; +use rand::Rng; use serde::{Deserialize, Serialize}; use std::{ str::FromStr, @@ -73,7 +74,7 @@ impl LocalAccountAuthenticator { account: &impl CommonKeylessAccount, ) -> KeylessSignature { let proof = account.zk_sig().proof; - let txn_and_zkp = TransactionAndProof { + let txn_and_zkp = keyless::TransactionAndProof { message: txn, proof: Some(proof), }; @@ -182,6 +183,96 @@ impl LocalAccount { Ok(Self::new(address, key, sequence_number)) } + pub fn generate_for_testing(rng: &mut R1, keyless_mode: bool) -> Self + where + R1: Rng + rand_core::CryptoRng, + { + if keyless_mode { + let config = keyless::Configuration::new_for_testing(); + let now_secs = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let esk = EphemeralPrivateKey::Ed25519 { + inner_private_key: Ed25519PrivateKey::generate(rng), + }; + let exp_timestamp_secs = now_secs + 7 * 86400; // + 7 days + let exp_horizon_secs = 100 * 86400; // 100 days + let blinder = vec![0x01; 31]; + let eph_key_pair = EphemeralKeyPair::new_with_keyless_config( + &config, + esk, + exp_timestamp_secs, + blinder, + ) + .unwrap(); + + // Simulation of OIDC provider processing. + let iss = keyless::test_utils::get_sample_iss(); + let jwk = keyless::test_utils::get_sample_jwk(); + let aud = format!("aud_{}", hex::encode(rng.gen::<[u8; 4]>())); + let uid_key = "sub".to_string(); + let uid_val = format!("uid_{}", hex::encode(rng.gen::<[u8; 4]>())); + let jwt_header = keyless::test_utils::get_sample_jwt_header_json(); + let jwt_header_b64 = keyless::base64url_encode_str(&jwt_header); + let jwt_payload = keyless::circuit_testcases::render_jwt_payload_json( + &iss, + &aud, + &uid_key, + &uid_val, + "", + now_secs, + &eph_key_pair.nonce, + now_secs + 86400, + ); + let jwt_payload_b64 = keyless::base64url_encode_str(&jwt_payload); + let jwt_msg = format!("{}.{}", jwt_header_b64, jwt_payload_b64); + let jwt_sig = keyless::test_utils::oidc_provider_sign( + *keyless::circuit_testcases::SAMPLE_JWK_SK, + jwt_msg.as_bytes(), + ); + let jwt_sig_b64 = base64::encode_config(jwt_sig, base64::URL_SAFE_NO_PAD); + let jwt = format!("{}.{}", jwt_msg, jwt_sig_b64); + + let pepper = keyless::test_utils::get_sample_pepper(); + let idc = keyless::IdCommitment::new_from_preimage(&pepper, &aud, &uid_key, &uid_val) + .unwrap(); + let public_inputs = keyless::bn254_circom::hash_public_inputs( + &config, + &eph_key_pair.public_key, + &idc, + exp_timestamp_secs, + exp_horizon_secs, + &iss, + None, + &jwt_header, + &jwk, + None, + ) + .unwrap(); + let groth16_proof = keyless::proof_simulation::Groth16SimulatorBn254::create_random_proof_with_trapdoor(&[public_inputs], &keyless::circuit_constants::TEST_GROTH16_SETUP.simulation_pk, rng).unwrap(); + let zk_sig = ZeroKnowledgeSig { + proof: keyless::ZKP::Groth16(groth16_proof), + exp_horizon_secs, + extra_field: None, + override_aud_val: None, + training_wheels_signature: None, + }; + // zk_sig.verify_groth16_proof(public_inputs, &TEST_GROTH16_KEYS.prepared_vk).unwrap(); + let keyless_account = + KeylessAccount::new_from_jwt(&jwt, eph_key_pair, Some(&uid_key), pepper, zk_sig) + .unwrap(); + + Self::new_keyless( + keyless_account.authentication_key().account_address(), + keyless_account, + 0, + ) + } else { + Self::generate(rng) + } + } + /// Generate a new account locally. Note: This function does not actually /// create an account on the Aptos blockchain, it just generates a new /// account locally. @@ -468,7 +559,7 @@ pub struct AccountKey { impl AccountKey { pub fn generate(rng: &mut R) -> Self where - R: ::rand_core::RngCore + ::rand_core::CryptoRng, + R: rand_core::RngCore + rand_core::CryptoRng, { let private_key = Ed25519PrivateKey::generate(rng); Self::from_private_key(private_key) @@ -555,7 +646,6 @@ impl EphemeralPrivateKey { pub struct EphemeralKeyPair { private_key: EphemeralPrivateKey, public_key: EphemeralPublicKey, - #[allow(dead_code)] nonce: String, expiry_date_secs: u64, blinder: Vec, @@ -567,13 +657,22 @@ impl EphemeralKeyPair { expiry_date_secs: u64, blinder: Vec, ) -> Result { - let epk = private_key.public_key(); - let nonce = OpenIdSig::reconstruct_oauth_nonce( - &blinder, - expiry_date_secs, - &epk, + Self::new_with_keyless_config( &Configuration::new_for_devnet(), - )?; + private_key, + expiry_date_secs, + blinder, + ) + } + + pub fn new_with_keyless_config( + config: &Configuration, + private_key: EphemeralPrivateKey, + expiry_date_secs: u64, + blinder: Vec, + ) -> Result { + let epk = private_key.public_key(); + let nonce = OpenIdSig::reconstruct_oauth_nonce(&blinder, expiry_date_secs, &epk, config)?; Ok(Self { private_key, diff --git a/testsuite/forge/src/interface/aptos.rs b/testsuite/forge/src/interface/aptos.rs index a77bad1eeadab..adbb34ed9c8b1 100644 --- a/testsuite/forge/src/interface/aptos.rs +++ b/testsuite/forge/src/interface/aptos.rs @@ -332,20 +332,12 @@ pub async fn reconfig( transaction_factory: &TransactionFactory, root_account: Arc, ) -> State { - let aptos_version = client.get_aptos_version().await.unwrap(); - let current = aptos_version.into_inner(); - let current_version = *current.major.inner(); let txns = { - vec![ - root_account.sign_with_transaction_builder(transaction_factory.clone().payload( - aptos_stdlib::version_set_for_next_epoch(current_version + 1), - )), - root_account.sign_with_transaction_builder( - transaction_factory - .clone() - .payload(aptos_stdlib::aptos_governance_force_end_epoch_test_only()), - ), - ] + vec![root_account.sign_with_transaction_builder( + transaction_factory + .clone() + .payload(aptos_stdlib::aptos_governance_force_end_epoch_test_only()), + )] }; submit_and_wait_reconfig(client, txns).await diff --git a/testsuite/module-publish/src/main.rs b/testsuite/module-publish/src/main.rs index 7be0b3756122f..283ea2f7fbeaf 100644 --- a/testsuite/module-publish/src/main.rs +++ b/testsuite/module-publish/src/main.rs @@ -150,7 +150,8 @@ fn write_package(file: &mut File, package_path: PathBuf, package_name: &str) -> // build package let package = BuiltPackage::build(package_path, BuildOptions::move_2()) .expect("building package must succeed"); - let code = package.extract_code(); + let modules = package.extract_code(); + let mut scripts = package.extract_script_code(); let package_metadata = package.extract_metadata().expect("Metadata must exist"); let metadata = bcs::to_bytes(&package_metadata).expect("Metadata must serialize"); @@ -164,7 +165,7 @@ fn write_package(file: &mut File, package_path: PathBuf, package_name: &str) -> let mut module_names = Vec::new(); // write out all modules - for module in &code { + for module in &modules { // this is an unfortunate way to find the module name but it is not // clear how to do it otherwise let compiled_module = CompiledModule::deserialize(module).expect("Module must deserialize"); @@ -180,6 +181,16 @@ fn write_package(file: &mut File, package_path: PathBuf, package_name: &str) -> module_names.push(name); } + assert!( + scripts.len() <= 1, + "Only single script can be added per package" + ); + if let Some(script) = scripts.pop() { + let name: String = format!("SCRIPT_{}", package_name.to_uppercase()); + writeln!(file).expect("Empty line failed"); + write_lazy(file, name.as_str(), &script); + } + writeln!(file).expect("Empty line failed"); writeln!(file, "#[rustfmt::skip]").expect("rustfmt skip failed"); writeln!( diff --git a/testsuite/module-publish/src/packages/simple/scripts/main.move b/testsuite/module-publish/src/packages/simple/scripts/main.move index 0c6c4767afb65..4812c886d72ef 100644 --- a/testsuite/module-publish/src/packages/simple/scripts/main.move +++ b/testsuite/module-publish/src/packages/simple/scripts/main.move @@ -1,7 +1,4 @@ script { - // Note: this constant can be replaced in compiled script to make it hash to a different value. - const SENDER: address = @0x1; - fun main(sender: &signer) { // The idea is to to ensure that this script takes some time to be deserialized and verified, but the actual // execution time is small (no-op). diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 1208b7629d85e..7cfdc79eb327e 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -161,47 +161,47 @@ class RunGroupConfig: # transaction_type module_working_set_size executor_type count min_ratio max_ratio median CALIBRATION = """ -no-op 1 VM 10 0.913 1.048 36932.7 -no-op 1000 VM 10 0.920 1.061 34126.3 -apt-fa-transfer 1 VM 10 0.920 1.053 25730.9 -account-generation 1 VM 10 0.928 1.051 19826.7 -account-resource32-b 1 VM 10 0.879 1.076 31969.2 -modify-global-resource 1 VM 10 0.965 1.042 2208.9 -modify-global-resource 100 VM 10 0.935 1.049 31969.2 -publish-package 1 VM 10 0.924 1.040 1800 -mix_publish_transfer 1 VM 10 0.946 1.097 19680.5 -batch100-transfer 1 VM 10 0.900 1.079 600.3 -vector-picture30k 1 VM 10 0.965 1.029 100.9 -vector-picture30k 100 VM 10 0.881 1.147 1657.7 -smart-table-picture30-k-with200-change 1 VM 10 0.948 1.040 16.0 -smart-table-picture30-k-with200-change 100 VM 10 0.956 1.033 214.8 -modify-global-resource-agg-v2 1 VM 10 0.861 1.093 31537.5 -modify-global-flag-agg-v2 1 VM 10 0.963 1.019 4174.2 -modify-global-bounded-agg-v2 1 VM 10 0.974 1.086 7366.5 -modify-global-milestone-agg-v2 1 VM 10 0.911 1.044 24049.2 -resource-groups-global-write-tag1-kb 1 VM 10 0.947 1.048 9111.1 -resource-groups-global-write-and-read-tag1-kb 1 VM 10 0.972 1.043 5400.4 -resource-groups-sender-write-tag1-kb 1 VM 10 0.944 1.161 19390.4 -resource-groups-sender-multi-change1-kb 1 VM 10 0.852 1.145 16564.9 -token-v1ft-mint-and-transfer 1 VM 10 0.929 1.027 1128.1 -token-v1ft-mint-and-transfer 100 VM 10 0.872 1.065 17163.7 -token-v1nft-mint-and-transfer-sequential 1 VM 10 0.934 1.032 717.5 -token-v1nft-mint-and-transfer-sequential 100 VM 10 0.919 1.047 12371.6 -coin-init-and-mint 1 VM 10 0.876 1.090 25097.7 -coin-init-and-mint 100 VM 10 0.882 1.073 21023.1 -fungible-asset-mint 1 VM 10 0.888 1.094 21176.2 -fungible-asset-mint 100 VM 10 0.883 1.069 18889.3 -no-op5-signers 1 VM 10 0.883 1.065 36305.8 -token-v2-ambassador-mint 1 VM 10 0.886 1.057 14411.2 -token-v2-ambassador-mint 100 VM 10 0.953 1.056 14358.6 -liquidity-pool-swap 1 VM 10 0.933 1.036 788.5 -liquidity-pool-swap 100 VM 10 0.935 1.058 10781.4 -liquidity-pool-swap-stable 1 VM 10 0.908 1.027 763.7 -liquidity-pool-swap-stable 100 VM 10 0.933 1.046 10568.9 -deserialize-u256 1 VM 10 0.943 1.080 33725.8 -no-op-fee-payer 1 VM 10 0.952 1.030 2037.8 -no-op-fee-payer 100 VM 10 0.909 1.061 31782.9 -simple-script 1 VM 10 0.868 1.061 35753.8 +no-op 1 VM 6 0.938 1.019 38925.3 +no-op 1000 VM 6 0.943 1.019 36444.6 +apt-fa-transfer 1 VM 6 0.927 1.018 26954.7 +account-generation 1 VM 6 0.96 1.02 20606.2 +account-resource32-b 1 VM 6 0.94 1.026 34260.4 +modify-global-resource 1 VM 6 0.993 1.021 2260.5 +modify-global-resource 100 VM 6 0.982 1.02 33129.7 +publish-package 1 VM 6 0.983 1.012 1672.6 +mix_publish_transfer 1 VM 6 0.972 1.044 20832.8 +batch100-transfer 1 VM 6 0.953 1.024 645.1 +vector-picture30k 1 VM 6 0.992 1.039 103.6 +vector-picture30k 100 VM 6 0.913 1.015 1831.5 +smart-table-picture30-k-with200-change 1 VM 6 0.976 1.034 16.1 +smart-table-picture30-k-with200-change 100 VM 6 0.985 1.018 212.9 +modify-global-resource-agg-v2 1 VM 6 0.976 1.035 33992.5 +modify-global-flag-agg-v2 1 VM 6 0.986 1.016 4224 +modify-global-bounded-agg-v2 1 VM 6 0.964 1.047 7661.6 +modify-global-milestone-agg-v2 1 VM 6 0.973 1.017 25187.1 +resource-groups-global-write-tag1-kb 1 VM 6 0.989 1.03 9215.7 +resource-groups-global-write-and-read-tag1-kb 1 VM 6 0.982 1.018 5538.3 +resource-groups-sender-write-tag1-kb 1 VM 6 0.985 1.059 20084.2 +resource-groups-sender-multi-change1-kb 1 VM 6 0.968 1.034 16400.4 +token-v1ft-mint-and-transfer 1 VM 6 0.987 1.022 1156.3 +token-v1ft-mint-and-transfer 100 VM 6 0.964 1.024 17842.6 +token-v1nft-mint-and-transfer-sequential 1 VM 6 0.984 1.017 735.7 +token-v1nft-mint-and-transfer-sequential 100 VM 6 0.966 1.017 12819.7 +coin-init-and-mint 1 VM 6 0.95 1.024 26906.4 +coin-init-and-mint 100 VM 6 0.985 1.022 22312.6 +fungible-asset-mint 1 VM 6 0.955 1.013 23001.6 +fungible-asset-mint 100 VM 6 0.955 1.015 19973.5 +no-op5-signers 1 VM 6 0.934 1.016 38708.6 +token-v2-ambassador-mint 1 VM 6 0.975 1.008 15179.3 +token-v2-ambassador-mint 100 VM 6 0.985 1.007 15150.8 +liquidity-pool-swap 1 VM 6 0.987 1.018 805.5 +liquidity-pool-swap 100 VM 6 0.993 1.02 11156.3 +liquidity-pool-swap-stable 1 VM 6 0.985 1.017 778.7 +liquidity-pool-swap-stable 100 VM 6 0.982 1.009 11056.6 +deserialize-u256 1 VM 6 0.968 1.026 36444.6 +no-op-fee-payer 1 VM 6 0.994 1.026 2046 +no-op-fee-payer 100 VM 6 0.96 1.014 32866.5 +simple-script 1 VM 6 0.941 1.012 38206.1 """ # when adding a new test, add estimated expected_tps to it, as well as waived=True. diff --git a/testsuite/smoke-test/src/keyless.rs b/testsuite/smoke-test/src/keyless.rs index f128c6aef9eca..612ef3a556cf9 100644 --- a/testsuite/smoke-test/src/keyless.rs +++ b/testsuite/smoke-test/src/keyless.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::smoke_test_environment::SwarmBuilder; +use crate::{smoke_test_environment::SwarmBuilder, utils::get_on_chain_resource}; use aptos::{common::types::GasOptions, test::CliTestFramework}; use aptos_cached_packages::aptos_stdlib; use aptos_crypto::{ @@ -324,8 +324,15 @@ script {{ let esk = EphemeralPrivateKey::Ed25519 { inner_private_key: get_sample_esk(), }; - let ephemeral_key_pair = - EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); + let rest_cli = swarm.validators().next().unwrap().rest_client(); + let config = get_on_chain_resource(&rest_cli).await; + let ephemeral_key_pair = EphemeralKeyPair::new_with_keyless_config( + &config, + esk, + get_sample_exp_date(), + get_sample_epk_blinder(), + ) + .unwrap(); let federated_keyless_account = FederatedKeylessAccount::new_from_jwt( &get_sample_jwt_token(), ephemeral_key_pair, @@ -430,11 +437,19 @@ async fn test_keyless_no_training_wheels_groth16_verifies() { async fn test_keyless_groth16_verifies_using_rust_sdk() { let (_tw_sk, _, _, swarm, mut cli, root_idx) = setup_local_net().await; + let rest_cli = swarm.validators().next().unwrap().rest_client(); + let config = get_on_chain_resource(&rest_cli).await; + let esk = EphemeralPrivateKey::Ed25519 { inner_private_key: get_sample_esk(), }; - let ephemeral_key_pair = - EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); + let ephemeral_key_pair = EphemeralKeyPair::new_with_keyless_config( + &config, + esk, + get_sample_exp_date(), + get_sample_epk_blinder(), + ) + .unwrap(); let mut info = swarm.aptos_public_info(); let keyless_account = KeylessAccount::new( @@ -489,12 +504,20 @@ async fn test_keyless_groth16_verifies_using_rust_sdk() { #[tokio::test] async fn test_keyless_groth16_verifies_using_rust_sdk_from_jwt() { let (_tw_sk, _, _, swarm, mut cli, root_idx) = setup_local_net().await; + let rest_cli = swarm.validators().next().unwrap().rest_client(); + let config = get_on_chain_resource(&rest_cli).await; let esk = EphemeralPrivateKey::Ed25519 { inner_private_key: get_sample_esk(), }; - let ephemeral_key_pair = - EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); + + let ephemeral_key_pair = EphemeralKeyPair::new_with_keyless_config( + &config, + esk, + get_sample_exp_date(), + get_sample_epk_blinder(), + ) + .unwrap(); let mut info = swarm.aptos_public_info(); let keyless_account = KeylessAccount::new_from_jwt( diff --git a/testsuite/smoke-test/src/storage.rs b/testsuite/smoke-test/src/storage.rs index b042a4b8de9c4..db753d0c38ecb 100644 --- a/testsuite/smoke-test/src/storage.rs +++ b/testsuite/smoke-test/src/storage.rs @@ -466,14 +466,16 @@ async fn do_transfer_or_reconfig(info: &mut AptosPublicInfo) -> Result<()> { const LOTS_MONEY: u64 = 100_000_000; let r = rand::random::() % 10; if r < 3 { - // reconfig + info!( + "{LINE} background task: triggering reconfig. Root account seq_num: {}. Ledger info: {:?}", + info.root_account().sequence_number(), + info.client().get_ledger_information().await.unwrap(), + ); info.reconfig().await; - } else if r == 9 { - // drain backlog - let mut sender = info.create_and_fund_user_account(LOTS_MONEY).await?; - let receiver = info.create_and_fund_user_account(LOTS_MONEY).await?; - let pending_txn = info.transfer(&mut sender, &receiver, 1).await?; - info.client().wait_for_transaction(&pending_txn).await?; + info!( + "{LINE} background task: Reconfig done. Root account seq_num: {}", + info.root_account().sequence_number(), + ); } else { let mut sender = info.create_and_fund_user_account(LOTS_MONEY).await?; let receiver = info.create_and_fund_user_account(LOTS_MONEY).await?; @@ -531,23 +533,30 @@ async fn test_db_restart() { for round in 0..3 { info!("{LINE} Restart round {round}"); for (v, vid) in restarting_validator_ids.iter().enumerate() { - info!("{LINE} Round {round}: Restarting validator {v}."); - info!( - "{LINE} ledger info: {:?}", - client.get_ledger_information().await.unwrap(), - ); let validator = swarm.validator_mut(*vid).unwrap(); // sometimes trigger reconfig right before the restart, to expose edge cases around // epoch change if rand::random::() % 3 == 0 { - info!("{LINE} Triggering reconfig right before restarting."); + info!( + "{LINE} Triggering reconfig right before restarting. Root account seq_num: {}. Ledger info: {:?}", + pub_chain_info.root_account().sequence_number(), + client.get_ledger_information().await.unwrap(), + ); reconfig( - &validator.rest_client(), + &client, &pub_chain_info.transaction_factory(), pub_chain_info.root_account(), ) .await; + info!( + "{LINE} Reconfig done. Root account seq_num: {}", + pub_chain_info.root_account().sequence_number(), + ) } + info!( + "{LINE} Round {round}: Restarting validator {v}. ledger info: {:?}", + client.get_ledger_information().await.unwrap(), + ); validator.restart().await.unwrap(); swarm .wait_for_all_nodes_to_catchup(Duration::from_secs(60)) @@ -558,11 +567,12 @@ async fn test_db_restart() { } } - info!("{LINE} Stopping background traffic, and check again that all validators are alive."); + info!("{LINE} Stopping background traffic, and make sure background task didn't panic."); quit_flag.store(true, Ordering::Release); // Make sure background thread didn't panic. background_traffic.await.unwrap(); + info!("{LINE} Check again that all validators are alive."); swarm .wait_for_all_nodes_to_catchup(Duration::from_secs(60)) .await diff --git a/testsuite/smoke-test/src/txn_emitter.rs b/testsuite/smoke-test/src/txn_emitter.rs index 0a8b2c6fac653..c91d33fb4179a 100644 --- a/testsuite/smoke-test/src/txn_emitter.rs +++ b/testsuite/smoke-test/src/txn_emitter.rs @@ -37,7 +37,8 @@ pub async fn generate_traffic( .await?; let transaction_factory = TransactionFactory::new(swarm.chain_info().chain_id).with_gas_unit_price(gas_price); - let emitter = TxnEmitter::new(transaction_factory, rng); + let rest_cli = swarm.validators().next().unwrap().rest_client(); + let emitter = TxnEmitter::new(transaction_factory, rng, rest_cli); emitter .emit_txn_for_with_stats( swarm.chain_info().root_account, @@ -68,8 +69,8 @@ pub async fn generate_keyless_traffic( .await?; let transaction_factory = TransactionFactory::new(swarm.chain_info().chain_id).with_gas_unit_price(gas_price); - - let emitter = TxnEmitter::new(transaction_factory, rng); + let rest_cli = swarm.validators().next().unwrap().rest_client(); + let emitter = TxnEmitter::new(transaction_factory, rng, rest_cli); emitter .emit_txn_for_with_stats( swarm.chain_info().root_account, @@ -267,7 +268,7 @@ async fn test_txn_emmitter_low_funds() { .collect::>(); let chain_info = swarm.chain_info(); let transaction_factory = TransactionFactory::new(chain_info.chain_id).with_gas_unit_price(100); - let emitter = TxnEmitter::new(transaction_factory, rng); + let emitter = TxnEmitter::new(transaction_factory, rng, validator_clients[0].clone()); let emit_job_request = EmitJobRequest::default() .rest_clients(validator_clients) diff --git a/testsuite/testcases/src/lib.rs b/testsuite/testcases/src/lib.rs index 3e3bc617c2fd7..7677993b04b7c 100644 --- a/testsuite/testcases/src/lib.rs +++ b/testsuite/testcases/src/lib.rs @@ -143,7 +143,14 @@ pub async fn create_emitter_and_request( let chain_info = swarm.read().await.chain_info(); let transaction_factory = TransactionFactory::new(chain_info.chain_id); - let emitter = TxnEmitter::new(transaction_factory, rng); + let rest_cli = swarm + .read() + .await + .validators() + .next() + .unwrap() + .rest_client(); + let emitter = TxnEmitter::new(transaction_factory, rng, rest_cli); emit_job_request = emit_job_request.rest_clients( swarm diff --git a/third_party/move/move-binary-format/src/compatibility.rs b/third_party/move/move-binary-format/src/compatibility.rs index 35706526ac9d2..be95e220456e6 100644 --- a/third_party/move/move-binary-format/src/compatibility.rs +++ b/third_party/move/move-binary-format/src/compatibility.rs @@ -33,6 +33,8 @@ pub struct Compatibility { pub(crate) check_struct_layout: bool, /// if false, treat `friend` as `private` when `check_struct_and_function_linking`. pub(crate) check_friend_linking: bool, + /// if false, entry function will be treated as regular function. + pub(crate) treat_entry_as_public: bool, } impl Default for Compatibility { @@ -41,6 +43,7 @@ impl Default for Compatibility { check_struct_and_pub_function_linking: true, check_struct_layout: true, check_friend_linking: true, + treat_entry_as_public: true, } } } @@ -55,14 +58,20 @@ impl Compatibility { check_struct_and_pub_function_linking: false, check_struct_layout: false, check_friend_linking: false, + treat_entry_as_public: false, } } - pub fn new(check_struct_layout: bool, check_friend_linking: bool) -> Self { + pub fn new( + check_struct_layout: bool, + check_friend_linking: bool, + treat_entry_as_public: bool, + ) -> Self { Self { check_struct_and_pub_function_linking: true, check_struct_layout, check_friend_linking, + treat_entry_as_public, } } @@ -139,11 +148,8 @@ impl Compatibility { // - if the function visibility is upgraded to public, it is OK // - otherwise, it is considered as incompatible. // - // NOTE: it is possible to relax the compatibility checking for a friend function, i.e., - // we can remove/change a friend function if the function is not used by any module in the - // friend list. But for simplicity, we decided to go to the more restrictive form now and - // we may revisit this in the future. for old_func in old_view.functions() { + // private, non entry function doesn't need to follow any checks here, skip if old_func.visibility() == Visibility::Private && !old_func.is_entry() { // Function not exposed, continue with next one continue; @@ -152,15 +158,28 @@ impl Compatibility { Some(new_func) => new_func, None => { // Function has been removed + // Function is NOT a private, non entry function. if !matches!(old_func.visibility(), Visibility::Friend) + // Above: Either Private Entry, or Public || self.check_friend_linking + // Here we know that the old_function has to be Friend. + // And if friends are not considered private (self.check_friend_linking is true), we can't update. + || (old_func.is_entry() && self.treat_entry_as_public) + // Here we know that the old_func has to be Friend, and the check_friend_linking is set to false. + // We make sure that we don't allow any Entry functions to be deleted, when self.treat_entry_as_public is set (treats entry as public) { errors.push(format!("removed function `{}`", old_func.name())); } continue; }, }; - if matches!(old_func.visibility(), Visibility::Friend) && !self.check_friend_linking { + + if matches!(old_func.visibility(), Visibility::Friend) + && !self.check_friend_linking + // Above: We want to skip linking checks for public(friend) if self.check_friend_linking is set to false. + && !(old_func.is_entry() && self.treat_entry_as_public) + // However, public(friend) entry function still needs to be checked. + { continue; } let is_vis_compatible = match (old_func.visibility(), new_func.visibility()) { diff --git a/third_party/move/move-binary-format/src/compatibility_legacy.rs b/third_party/move/move-binary-format/src/compatibility_legacy.rs index a140863d65137..667dc0e09583b 100644 --- a/third_party/move/move-binary-format/src/compatibility_legacy.rs +++ b/third_party/move/move-binary-format/src/compatibility_legacy.rs @@ -71,17 +71,20 @@ impl Compatibility { // - if the function visibility is upgraded to public, it is OK // - otherwise, it is considered as incompatible. // - // NOTE: it is possible to relax the compatibility checking for a friend function, i.e., - // we can remove/change a friend function if the function is not used by any module in the - // friend list. But for simplicity, we decided to go to the more restrictive form now and - // we may revisit this in the future. for (name, old_func) in &old_module.exposed_functions { let new_func = match new_module.exposed_functions.get(name) { Some(new_func) => new_func, None => { - if matches!(old_func.visibility, Visibility::Friend) { + if matches!(old_func.visibility, Visibility::Friend) + && !(old_func.is_entry && self.treat_entry_as_public) + // self.treat_entry_as_public is false: trying to remove friend + // self.treat_entry_as_public is true: trying to remove Friend non-entry + { + // Report as friend linking error, which would be dismissed when + // self.check_friend_linking is set to false friend_linking = false; } else { + // Otherwise report as function linking error. struct_and_pub_function_linking = false; } continue; @@ -118,9 +121,16 @@ impl Compatibility { &new_func.type_parameters, ) { - if matches!(old_func.visibility, Visibility::Friend) { + if matches!(old_func.visibility, Visibility::Friend) + && (!old_func.is_entry || !self.treat_entry_as_public) + // self.treat_entry_as_public is false: trying to change signature of a friend function + // self.treat_entry_as_public is true: trying to change signature of a friend non-entry function. + { + // Report as friend linking error, which would be dismissed when + // self.check_friend_linking is set to false friend_linking = false; } else { + // Otherwise report as function linking error. struct_and_pub_function_linking = false; } } diff --git a/third_party/move/move-compiler-v2/src/plan_builder.rs b/third_party/move/move-compiler-v2/src/plan_builder.rs index 804ba329bd0a4..b03c91c712938 100644 --- a/third_party/move/move-compiler-v2/src/plan_builder.rs +++ b/third_party/move/move-compiler-v2/src/plan_builder.rs @@ -503,9 +503,16 @@ fn get_assigned_attribute( fn convert_location(env: &GlobalEnv, attr: Attribute) -> Option { let (loc, value) = get_assigned_attribute(env, TestingAttribute::ERROR_LOCATION, attr)?; match value { - AttributeValue::Name(id, opt_module_name, _sym) => { + AttributeValue::Name(id, opt_module_name, sym) => { let vloc = env.get_node_loc(id); - convert_module_id(env, vloc, opt_module_name) + let module_id_opt = convert_module_id(env, vloc.clone(), opt_module_name); + if !sym.display(env.symbol_pool()).to_string().is_empty() || module_id_opt.is_none() { + env.error_with_labels(&loc, "invalid attribute value", vec![( + vloc, + "Expected a module identifier, e.g. 'std::vector'".to_string(), + )]); + } + module_id_opt }, AttributeValue::Value(id, _val) => { let vloc = env.get_node_loc(id); diff --git a/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.exp b/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.exp index 5697666500681..133de995f34f7 100644 --- a/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.exp +++ b/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.exp @@ -31,9 +31,33 @@ error: Expected `location` following `major_status` │ ^^^^^^^^^^^^^^^^^ error: invalid attribute value - ┌─ tests/unit_test/test/other_failures_invalid_location.move:38:59 + ┌─ tests/unit_test/test/other_failures_invalid_location.move:26:38 │ -38 │ #[expected_failure(major_status=4016, minor_status=0, location=0)] +26 │ #[expected_failure(vector_error, location=x)] + │ ^^^^^^^^^^ + │ │ + │ Expected a module identifier, e.g. 'std::vector' + +error: invalid attribute value + ┌─ tests/unit_test/test/other_failures_invalid_location.move:30:36 + │ +30 │ #[expected_failure(out_of_gas, location=0x1::m::t0)] + │ ^^^^^^^^^^^^^^^^^^^ + │ │ + │ Expected a module identifier, e.g. 'std::vector' + +error: invalid attribute value + ┌─ tests/unit_test/test/other_failures_invalid_location.move:38:43 + │ +38 │ #[expected_failure(major_status=4004, location=self)] + │ ^^^^^^^^^^^^^ + │ │ + │ Expected a module identifier, e.g. 'std::vector' + +error: invalid attribute value + ┌─ tests/unit_test/test/other_failures_invalid_location.move:42:59 + │ +42 │ #[expected_failure(major_status=4016, minor_status=0, location=0)] │ ^^^^^^^^^^ │ │ │ Expected a module identifier, e.g. 'std::vector' diff --git a/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.move b/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.move index 786652248169f..93bfcf8a4b695 100644 --- a/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.move +++ b/third_party/move/move-compiler-v2/tests/unit_test/test/other_failures_invalid_location.move @@ -30,6 +30,10 @@ module 0x1::m { #[expected_failure(out_of_gas, location=0x1::m::t0)] fun u2() { } + #[test] + #[expected_failure(out_of_gas, location=0x1::m)] + fun u2_correct() { } + #[test] #[expected_failure(major_status=4004, location=self)] fun u3() { } diff --git a/third_party/move/move-model/src/metadata.rs b/third_party/move/move-model/src/metadata.rs index 338b6d9e94a29..0a6bacc5e7330 100644 --- a/third_party/move/move-model/src/metadata.rs +++ b/third_party/move/move-model/src/metadata.rs @@ -171,6 +171,15 @@ impl CompilerVersion { _ => Ok(()), } } + + /// Infer the latest stable language version based on the compiler version + pub fn infer_stable_language_version(&self) -> LanguageVersion { + if *self == CompilerVersion::V1 { + LanguageVersion::V1 + } else { + LanguageVersion::latest_stable() + } + } } // ================================================================================' diff --git a/third_party/move/scripts/move_pr.sh b/third_party/move/scripts/move_pr.sh index ef6588b4e6d9b..56b1ef095cec7 100755 --- a/third_party/move/scripts/move_pr.sh +++ b/third_party/move/scripts/move_pr.sh @@ -197,10 +197,10 @@ if [ ! -z "$COMPILER_V2_TEST" ]; then echo "*************** [move-pr] Running integration tests with compiler v2" ( cd $BASE - MVC_DOCGEN_OUTPUT_DIR=tests/compiler-v2-doc MOVE_COMPILER_V2=true cargo build $CARGO_OP_PARAMS \ + MVC_DOCGEN_OUTPUT_DIR=tests/compiler-v2-doc MOVE_COMPILER_V2=true MOVE_LANGUAGE_V2=true cargo build $CARGO_OP_PARAMS \ $MOVE_CRATES_V2_ENV_DEPENDENT MVC_DOCGEN_OUTPUT_DIR=tests/compiler-v2-doc \ - MOVE_COMPILER_V2=true cargo nextest run $CARGO_NEXTEST_PARAMS \ + MOVE_COMPILER_V2=true MOVE_LANGUAGE_V2=true cargo nextest run $CARGO_NEXTEST_PARAMS \ $MOVE_CRATES_V2_ENV_DEPENDENT ) fi diff --git a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs index da5e6faa106ab..0cf1f792eb255 100644 --- a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs +++ b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs @@ -302,6 +302,7 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { Compatibility::new( !extra_args.skip_check_struct_layout, !extra_args.skip_check_friend_linking, + false, ) }; if vm.vm_config().use_loader_v2 { diff --git a/types/src/jwks/patch/mod.rs b/types/src/jwks/patch/mod.rs index cdcc693551ba5..0dce3842ce8fb 100644 --- a/types/src/jwks/patch/mod.rs +++ b/types/src/jwks/patch/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - jwks::jwk::JWKMoveStruct, + jwks::jwk::{JWKMoveStruct, JWK}, move_any::{Any as MoveAny, AsMoveAny}, move_utils::as_move_value::AsMoveValue, }; @@ -36,3 +36,10 @@ pub struct PatchUpsertJWK { impl AsMoveAny for PatchUpsertJWK { const MOVE_TYPE_NAME: &'static str = "0x1::jwks::PatchUpsertJWK"; } + +/// A variant representation used in genesis layout. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct IssuerJWK { + pub issuer: String, + pub jwk: JWK, +} diff --git a/types/src/keyless/bn254_circom.rs b/types/src/keyless/bn254_circom.rs index 1c37c1d8b176b..1991e32c91420 100644 --- a/types/src/keyless/bn254_circom.rs +++ b/types/src/keyless/bn254_circom.rs @@ -9,6 +9,7 @@ use crate::{ KeylessSignature, }, serialize, + transaction::authenticator::EphemeralPublicKey, }; use anyhow::bail; use aptos_crypto::{poseidon_bn254, poseidon_bn254::pad_and_hash_string, CryptoMaterialError}; @@ -252,7 +253,7 @@ static PAD_AND_HASH_STRING_CACHE: Lazy> = static JWK_HASH_CACHE: Lazy> = Lazy::new(|| Cache::new(100)); -pub fn cached_pad_and_hash_string(str: &String, max_bytes: usize) -> anyhow::Result { +pub fn cached_pad_and_hash_string(str: &str, max_bytes: usize) -> anyhow::Result { let key = (str.to_string(), max_bytes); match PAD_AND_HASH_STRING_CACHE.get(&key) { None => { @@ -275,6 +276,98 @@ pub fn cached_jwk_hash(jwk: &RSA_JWK) -> anyhow::Result { } } +pub fn hash_public_inputs( + config: &Configuration, + epk: &EphemeralPublicKey, + idc: &IdCommitment, + exp_timestamp_secs: u64, + exp_horizon_secs: u64, + iss: &str, + extra_field: Option<&str>, + jwt_header_json: &str, + jwk: &RSA_JWK, + override_aud_val: Option<&str>, +) -> anyhow::Result { + let (has_extra_field, extra_field_hash) = match extra_field { + None => (Fr::zero(), *EMPTY_EXTRA_FIELD_HASH), + Some(extra_field) => ( + Fr::one(), + poseidon_bn254::keyless::pad_and_hash_string( + extra_field, + config.max_extra_field_bytes as usize, + )?, + ), + }; + + let (override_aud_val_hash, use_override_aud) = match override_aud_val { + Some(override_aud_val) => ( + cached_pad_and_hash_string(override_aud_val, IdCommitment::MAX_AUD_VAL_BYTES)?, + ark_bn254::Fr::from(1), + ), + None => (*EMPTY_OVERRIDE_AUD_FIELD_HASH, ark_bn254::Fr::from(0)), + }; + + // Add the hash of the jwt_header with the "." separator appended + let jwt_header_b64_with_separator = format!("{}.", base64url_encode_str(jwt_header_json)); + let jwt_header_hash = cached_pad_and_hash_string( + &jwt_header_b64_with_separator, + config.max_jwt_header_b64_bytes as usize, + )?; + + let jwk_hash = cached_jwk_hash(jwk)?; + + // Add the hash of the value of the `iss` field + let iss_field_hash = cached_pad_and_hash_string(iss, config.max_iss_val_bytes as usize)?; + + // Add the id_commitment as a scalar + let idc = Fr::from_le_bytes_mod_order(&idc.0); + + // Add the exp_timestamp_secs as a scalar + let exp_timestamp_secs = Fr::from(exp_timestamp_secs); + + // Add the epk lifespan as a scalar + let exp_horizon_secs = Fr::from(exp_horizon_secs); + + let mut epk_frs = poseidon_bn254::keyless::pad_and_pack_bytes_to_scalars_with_len( + epk.to_bytes().as_slice(), + config.max_commited_epk_bytes as usize, + )?; + + // println!("Num EPK scalars: {}", epk_frs.len()); + // for (i, e) in epk_frs.iter().enumerate() { + // println!("EPK Fr[{}]: {}", i, e.to_string()) + // } + // println!("IDC: {}", idc); + // println!("exp_timestamp_secs: {}", exp_timestamp_secs); + // println!("exp_horizon_secs: {}", exp_horizon_secs); + // println!("iss field: {}", pk.iss_val); + // println!("iss field hash: {}", iss_field_hash); + // println!("Has extra field: {}", has_extra_field); + // println!("Extra field val: {:?}", proof.extra_field); + // println!("Extra field hash: {}", extra_field_hash); + // println!("JWT header val: {}", jwt_header_b64_with_separator); + // println!("JWT header hash: {}", jwt_header_hash); + // println!("JWK hash: {}", jwk_hash); + // println!("Override aud hash: {}", override_aud_val_hash); + // println!("Use override aud: {}", use_override_aud.to_string()); + + let mut frs = vec![]; + frs.append(&mut epk_frs); + frs.push(idc); + frs.push(exp_timestamp_secs); + frs.push(exp_horizon_secs); + frs.push(iss_field_hash); + frs.push(has_extra_field); + frs.push(extra_field_hash); + frs.push(jwt_header_hash); + frs.push(jwk_hash); + frs.push(override_aud_val_hash); + frs.push(use_override_aud); + // TODO(keyless): If we plan on avoiding verifying the same PIH twice, there should be no + // need for caching here. If we do not, we should cache the result here too. + poseidon_bn254::hash_scalars(frs) +} + pub fn get_public_inputs_hash( sig: &KeylessSignature, pk: &KeylessPublicKey, @@ -282,87 +375,18 @@ pub fn get_public_inputs_hash( config: &Configuration, ) -> anyhow::Result { if let EphemeralCertificate::ZeroKnowledgeSig(proof) = &sig.cert { - let (has_extra_field, extra_field_hash) = match &proof.extra_field { - None => (Fr::zero(), *EMPTY_EXTRA_FIELD_HASH), - Some(extra_field) => ( - Fr::one(), - poseidon_bn254::keyless::pad_and_hash_string( - extra_field, - config.max_extra_field_bytes as usize, - )?, - ), - }; - - let (override_aud_val_hash, use_override_aud) = match &proof.override_aud_val { - Some(override_aud_val) => ( - cached_pad_and_hash_string(override_aud_val, IdCommitment::MAX_AUD_VAL_BYTES)?, - ark_bn254::Fr::from(1), - ), - None => (*EMPTY_OVERRIDE_AUD_FIELD_HASH, ark_bn254::Fr::from(0)), - }; - - // Add the hash of the jwt_header with the "." separator appended - let jwt_header_b64_with_separator = - format!("{}.", base64url_encode_str(sig.jwt_header_json.as_str())); - let jwt_header_hash = cached_pad_and_hash_string( - &jwt_header_b64_with_separator, - config.max_jwt_header_b64_bytes as usize, - )?; - - let jwk_hash = cached_jwk_hash(jwk)?; - - // Add the hash of the value of the `iss` field - let iss_field_hash = - cached_pad_and_hash_string(&pk.iss_val, config.max_iss_val_bytes as usize)?; - - // Add the id_commitment as a scalar - let idc = Fr::from_le_bytes_mod_order(&pk.idc.0); - - // Add the exp_timestamp_secs as a scalar - let exp_timestamp_secs = Fr::from(sig.exp_date_secs); - - // Add the epk lifespan as a scalar - let exp_horizon_secs = Fr::from(proof.exp_horizon_secs); - - // Add the epk as padded and packed scalars - let mut epk_frs = poseidon_bn254::keyless::pad_and_pack_bytes_to_scalars_with_len( - sig.ephemeral_pubkey.to_bytes().as_slice(), - config.max_commited_epk_bytes as usize, - )?; - - // println!("Num EPK scalars: {}", epk_frs.len()); - // for (i, e) in epk_frs.iter().enumerate() { - // println!("EPK Fr[{}]: {}", i, e.to_string()) - // } - // println!("IDC: {}", idc); - // println!("exp_timestamp_secs: {}", exp_timestamp_secs); - // println!("exp_horizon_secs: {}", exp_horizon_secs); - // println!("iss field: {}", pk.iss_val); - // println!("iss field hash: {}", iss_field_hash); - // println!("Has extra field: {}", has_extra_field); - // println!("Extra field val: {:?}", proof.extra_field); - // println!("Extra field hash: {}", extra_field_hash); - // println!("JWT header val: {}", jwt_header_b64_with_separator); - // println!("JWT header hash: {}", jwt_header_hash); - // println!("JWK hash: {}", jwk_hash); - // println!("Override aud hash: {}", override_aud_val_hash); - // println!("Use override aud: {}", use_override_aud.to_string()); - - let mut frs = vec![]; - frs.append(&mut epk_frs); - frs.push(idc); - frs.push(exp_timestamp_secs); - frs.push(exp_horizon_secs); - frs.push(iss_field_hash); - frs.push(has_extra_field); - frs.push(extra_field_hash); - frs.push(jwt_header_hash); - frs.push(jwk_hash); - frs.push(override_aud_val_hash); - frs.push(use_override_aud); - // TODO(keyless): If we plan on avoiding verifying the same PIH twice, there should be no - // need for caching here. If we do not, we should cache the result here too. - poseidon_bn254::hash_scalars(frs) + hash_public_inputs( + config, + &sig.ephemeral_pubkey, + &pk.idc, + sig.exp_date_secs, + proof.exp_horizon_secs, + &pk.iss_val, + proof.extra_field.as_deref(), + &sig.jwt_header_json, + jwk, + proof.override_aud_val.as_deref(), + ) } else { bail!("Can only call `get_public_inputs_hash` on keyless::Signature with Groth16 ZK proof") } diff --git a/types/src/keyless/circuit_constants.rs b/types/src/keyless/circuit_constants.rs index 60cf6a96f914e..ce0017e1fe5a9 100644 --- a/types/src/keyless/circuit_constants.rs +++ b/types/src/keyless/circuit_constants.rs @@ -3,10 +3,15 @@ //! These constants are from commit 125522b4b226f8ece3e3162cecfefe915d13bc30 of keyless-circuit. -use crate::keyless::bn254_circom::{g1_projective_str_to_affine, g2_projective_str_to_affine}; +use crate::keyless::{ + bn254_circom::{g1_projective_str_to_affine, g2_projective_str_to_affine}, + proof_simulation::{Groth16SimulatorBn254, Trapdoor}, +}; use aptos_crypto::poseidon_bn254; use ark_bn254::Bn254; use ark_groth16::{PreparedVerifyingKey, VerifyingKey}; +use once_cell::sync::Lazy; +use rand::{prelude::StdRng, SeedableRng}; pub(crate) const MAX_AUD_VAL_BYTES: usize = 120; pub(crate) const MAX_UID_KEY_BYTES: usize = 30; @@ -92,3 +97,19 @@ pub fn devnet_prepared_vk() -> PreparedVerifyingKey { PreparedVerifyingKey::from(vk) } + +pub struct Groth16TrapdoorSetup { + pub simulation_pk: Trapdoor, + pub prepared_vk: PreparedVerifyingKey, +} + +pub static TEST_GROTH16_SETUP: Lazy = Lazy::new(|| { + let mut rng = StdRng::seed_from_u64(999); + let (simulation_pk, vk) = + Groth16SimulatorBn254::circuit_agnostic_setup_with_trapdoor(&mut rng, 1).unwrap(); + let prepared_vk = PreparedVerifyingKey::from(vk.clone()); + Groth16TrapdoorSetup { + simulation_pk, + prepared_vk, + } +}); diff --git a/types/src/keyless/circuit_testcases.rs b/types/src/keyless/circuit_testcases.rs index b542d26965232..bb6896bb45bc0 100644 --- a/types/src/keyless/circuit_testcases.rs +++ b/types/src/keyless/circuit_testcases.rs @@ -61,6 +61,31 @@ pub fn sample_jwt_payload_json() -> String { ) } +pub fn render_jwt_payload_json( + iss: &str, + aud: &str, + uid_key: &str, + uid_val: &str, + extra_field: &str, + iat: u64, + nonce: &str, + exp: u64, +) -> String { + format!( + r#"{{ + "iss":"{}", + "aud":"{}", + "{}":"{}", + {} + "iat":{}, + "nonce":"{}", + "exp":{} + }} + "#, + iss, aud, uid_key, uid_val, extra_field, iat, nonce, exp + ) +} + pub fn sample_jwt_payload_json_overrides( iss: &str, uid_val: &str, @@ -109,7 +134,7 @@ pub(crate) static SAMPLE_JWK: Lazy = Lazy::new(insecure_test_rsa_jwk); /// This is the SK from https://token.dev/. /// To convert it into a JSON, you can use https://irrte.ch/jwt-js-decode/pem2jwk.html -pub(crate) static SAMPLE_JWK_SK: Lazy<&RsaKeyPair> = Lazy::new(|| &*INSECURE_TEST_RSA_KEY_PAIR); +pub static SAMPLE_JWK_SK: Lazy<&RsaKeyPair> = Lazy::new(|| &*INSECURE_TEST_RSA_KEY_PAIR); pub(crate) const SAMPLE_UID_KEY: &str = "sub"; diff --git a/types/src/keyless/configuration.rs b/types/src/keyless/configuration.rs index b5b444110670a..58818d1a3063e 100644 --- a/types/src/keyless/configuration.rs +++ b/types/src/keyless/configuration.rs @@ -7,6 +7,7 @@ use crate::{ circuit_constants, circuit_testcases::SAMPLE_EXP_HORIZON_SECS, KEYLESS_ACCOUNT_MODULE_NAME, }, move_utils::as_move_value::AsMoveValue, + on_chain_config::OnChainConfig, }; use move_core_types::{ ident_str, @@ -92,3 +93,8 @@ impl Configuration { } } } + +impl OnChainConfig for Configuration { + const MODULE_IDENTIFIER: &'static str = KEYLESS_ACCOUNT_MODULE_NAME; + const TYPE_IDENTIFIER: &'static str = "Configuration"; +} diff --git a/types/src/keyless/mod.rs b/types/src/keyless/mod.rs index 1e92aa1abf629..39b546673a723 100644 --- a/types/src/keyless/mod.rs +++ b/types/src/keyless/mod.rs @@ -21,8 +21,8 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; -mod bn254_circom; -mod circuit_constants; +pub mod bn254_circom; +pub mod circuit_constants; pub mod circuit_testcases; mod configuration; mod groth16_sig; @@ -420,7 +420,7 @@ pub fn get_authenticators( Ok(authenticators) } -pub(crate) fn base64url_encode_str(data: &str) -> String { +pub fn base64url_encode_str(data: &str) -> String { base64::encode_config(data.as_bytes(), URL_SAFE_NO_PAD) } diff --git a/types/src/keyless/test_utils.rs b/types/src/keyless/test_utils.rs index c8babbd5913af..1d275ff3c8b30 100644 --- a/types/src/keyless/test_utils.rs +++ b/types/src/keyless/test_utils.rs @@ -29,7 +29,7 @@ use ark_groth16::{prepare_verifying_key, PreparedVerifyingKey}; use base64::{encode_config, URL_SAFE_NO_PAD}; use move_core_types::account_address::AccountAddress; use once_cell::sync::Lazy; -use ring::signature; +use ring::{signature, signature::RsaKeyPair}; static DUMMY_EPHEMERAL_SIGNATURE: Lazy = Lazy::new(|| { let sk = Ed25519PrivateKey::generate_for_testing(); @@ -308,6 +308,20 @@ pub fn get_sample_jwt_token_from_payload(payload: &str) -> String { format!("{}.{}", msg, base64url_string) } +pub fn oidc_provider_sign(sk: &RsaKeyPair, msg: &[u8]) -> Vec { + let mut jwt_sig = vec![0u8; sk.public_modulus_len()]; + let rng = ring::rand::SystemRandom::new(); + sk.sign( + &signature::RSA_PKCS1_SHA256, + &rng, + msg, + jwt_sig.as_mut_slice(), + ) + .unwrap(); + + jwt_sig +} + /// Note: Does not have a valid ephemeral signature. Use the SAMPLE_ESK to compute one over the /// desired TXN. pub fn get_sample_openid_sig_and_pk() -> (KeylessSignature, KeylessPublicKey) { diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 19ba7b7d1fb18..905c907b7df4e 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -61,7 +61,7 @@ pub enum FeatureFlag { COMMISSION_CHANGE_DELEGATION_POOL = 42, BN254_STRUCTURES = 43, WEBAUTHN_SIGNATURE = 44, - RECONFIGURE_WITH_DKG = 45, + _DEPRECATED_RECONFIGURE_WITH_DKG = 45, KEYLESS_ACCOUNTS = 46, KEYLESS_BUT_ZKLESS_ACCOUNTS = 47, REMOVE_DETAILED_ERROR_FROM_HASH = 48, @@ -147,7 +147,6 @@ impl FeatureFlag { FeatureFlag::RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET, FeatureFlag::COMMISSION_CHANGE_DELEGATION_POOL, FeatureFlag::WEBAUTHN_SIGNATURE, - // FeatureFlag::RECONFIGURE_WITH_DKG, //TODO: re-enable once randomness is ready. FeatureFlag::KEYLESS_ACCOUNTS, FeatureFlag::FEDERATED_KEYLESS, FeatureFlag::KEYLESS_BUT_ZKLESS_ACCOUNTS, @@ -178,6 +177,7 @@ impl FeatureFlag { FeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT, FeatureFlag::TRANSACTION_SIMULATION_ENHANCEMENT, FeatureFlag::NATIVE_MEMORY_OPERATIONS, + FeatureFlag::COLLECTION_OWNER, FeatureFlag::ENABLE_LOADER_V2, ] } diff --git a/types/src/on_chain_config/timed_features.rs b/types/src/on_chain_config/timed_features.rs index c3161c47fedac..ea7cafa68a7b9 100644 --- a/types/src/on_chain_config/timed_features.rs +++ b/types/src/on_chain_config/timed_features.rs @@ -15,6 +15,7 @@ pub enum TimedFeatureFlag { DisableInvariantViolationCheckInSwapLoc, LimitTypeTagSize, ModuleComplexityCheck, + EntryCompatibility, } /// Representation of features that are gated by the block timestamps. @@ -43,10 +44,14 @@ impl TimedFeatureOverride { Replay => match flag { LimitTypeTagSize => true, ModuleComplexityCheck => true, + EntryCompatibility => true, // Add overrides for replay here. _ => return None, }, - Testing => false, // Activate all flags + Testing => match flag { + EntryCompatibility => true, + _ => return None, // Activate all flags + }, }) } } @@ -63,6 +68,9 @@ impl TimedFeatureFlag { (ModuleComplexityCheck, TESTNET) => 1719356400000, /* Tuesday, June 21, 2024 16:00:00 AM GMT-07:00 */ (ModuleComplexityCheck, MAINNET) => 1720033200000, /* Wednesday, July 3, 2024 12:00:00 AM GMT-07:00 */ + (EntryCompatibility, TESTNET) => 1730923200000, /* Wednesday, Nov 6, 2024 12:00:00 AM GMT-07:00 */ + (EntryCompatibility, MAINNET) => 1731441600000, /* Tuesday, Nov 12, 2024 12:00:00 AM GMT-07:00 */ + // If unspecified, a timed feature is considered enabled from the very beginning of time. _ => 0, }