From 33eb0d987f2b06428c96bcfa05e91469afe0ce9c Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Sat, 8 May 2021 22:17:58 +0800 Subject: [PATCH 01/18] fix: load cell data hash by input Ensure load_cell_data_hash is consistent with the load_cell_data behavior --- chain/src/tests/load_input_data_hash_cell.rs | 14 +++---------- script/src/syscalls/load_cell.rs | 17 +++++++++------- script/src/syscalls/mod.rs | 21 ++++++++++++++++++++ script/src/verify.rs | 3 ++- tx-pool/src/component/pending.rs | 7 ++----- tx-pool/src/component/proposed.rs | 7 ++----- util/test-chain-utils/src/mock_store.rs | 7 ++----- util/types/src/core/cell.rs | 7 ++----- 8 files changed, 44 insertions(+), 39 deletions(-) diff --git a/chain/src/tests/load_input_data_hash_cell.rs b/chain/src/tests/load_input_data_hash_cell.rs index b3b77fb81b..a6556c079e 100644 --- a/chain/src/tests/load_input_data_hash_cell.rs +++ b/chain/src/tests/load_input_data_hash_cell.rs @@ -4,7 +4,6 @@ use crate::tests::util::{ use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_dao_utils::genesis_dao_data; use ckb_test_chain_utils::load_input_data_hash_cell; -use ckb_tx_pool::{PlugTarget, TxEntry}; use ckb_types::prelude::*; use ckb_types::{ bytes::Bytes, @@ -49,7 +48,7 @@ pub(crate) fn create_load_input_data_hash_transaction( .build() } -// Ensure tx-pool reject tx which calls syscall load_cell_data_hash from input +// Ensure tx-pool accept tx which calls syscall load_cell_data_hash from input #[test] fn test_load_input_data_hash_cell() { let (_, _, load_input_data_hash_script) = load_input_data_hash_cell(); @@ -87,15 +86,8 @@ fn test_load_input_data_hash_cell() { let tx_pool = shared.tx_pool_controller(); let ret = tx_pool.submit_local_tx(tx0.clone()).unwrap(); - assert!(ret.is_err()); - //ValidationFailure(2) missing item - assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); + assert!(ret.is_ok()); - let entry0 = vec![TxEntry::dummy_resolve(tx0, 0, Capacity::shannons(0), 100)]; - tx_pool.plug_entry(entry0, PlugTarget::Proposed).unwrap(); - - // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already in tx-pool let ret = tx_pool.submit_local_tx(tx1).unwrap(); - assert!(ret.is_err()); - assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); + assert!(ret.is_ok()); } diff --git a/script/src/syscalls/load_cell.rs b/script/src/syscalls/load_cell.rs index a34cd0cb04..9a2d85710d 100644 --- a/script/src/syscalls/load_cell.rs +++ b/script/src/syscalls/load_cell.rs @@ -6,6 +6,7 @@ use crate::{ }, }; use byteorder::{LittleEndian, WriteBytesExt}; +use ckb_traits::CellDataProvider; use ckb_types::{ core::{cell::CellMeta, Capacity}, packed::CellOutput, @@ -16,7 +17,8 @@ use ckb_vm::{ Error as VMError, Register, SupportMachine, Syscalls, }; -pub struct LoadCell<'a> { +pub struct LoadCell<'a, DL> { + data_loader: &'a DL, outputs: &'a [CellMeta], resolved_inputs: &'a [CellMeta], resolved_cell_deps: &'a [CellMeta], @@ -24,15 +26,17 @@ pub struct LoadCell<'a> { group_outputs: &'a [usize], } -impl<'a> LoadCell<'a> { +impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { pub fn new( + data_loader: &'a DL, outputs: &'a [CellMeta], resolved_inputs: &'a [CellMeta], resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], - ) -> LoadCell<'a> { + ) -> LoadCell<'a, DL> { LoadCell { + data_loader, outputs, resolved_inputs, resolved_cell_deps, @@ -98,9 +102,8 @@ impl<'a> LoadCell<'a> { (SUCCESS, store_data(machine, &buffer)?) } CellField::DataHash => { - if let Some(data_hash) = &cell.mem_cell_data_hash { - let bytes = data_hash.raw_data(); - (SUCCESS, store_data(machine, &bytes)?) + if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { + (SUCCESS, store_data(machine, &bytes.as_bytes())?) } else { (ITEM_MISSING, 0) } @@ -144,7 +147,7 @@ impl<'a> LoadCell<'a> { } } -impl<'a, Mac: SupportMachine> Syscalls for LoadCell<'a> { +impl<'a, Mac: SupportMachine, DL: CellDataProvider> Syscalls for LoadCell<'a, DL> { fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index 1382cb75fd..f3864aa036 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -244,7 +244,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -285,7 +288,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -374,7 +380,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -425,7 +434,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -489,7 +501,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -540,7 +555,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -878,7 +896,10 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; + let store = new_store(); + let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( + &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, diff --git a/script/src/verify.rs b/script/src/verify.rs index 7309f79bb6..a78de642a9 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -205,8 +205,9 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D &'a self, group_inputs: &'a [usize], group_outputs: &'a [usize], - ) -> LoadCell<'a> { + ) -> LoadCell<'a, DL> { LoadCell::new( + &self.data_loader, &self.outputs, self.resolved_inputs(), self.resolved_cell_deps(), diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs index 0563630530..64a0b8c0d9 100644 --- a/tx-pool/src/component/pending.rs +++ b/tx-pool/src/component/pending.rs @@ -107,7 +107,7 @@ impl PendingQueue { } impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { let tx_hash = out_point.tx_hash(); if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { match entry @@ -115,12 +115,9 @@ impl CellProvider for PendingQueue { .output_with_data(out_point.index().unpack()) { Some((output, data)) => { - let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) + let cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); - if !with_data { - cell_meta.mem_cell_data_hash = None; - } CellStatus::live_cell(cell_meta) } None => CellStatus::Unknown, diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs index c87507b223..22f8b9b333 100644 --- a/tx-pool/src/component/proposed.rs +++ b/tx-pool/src/component/proposed.rs @@ -97,19 +97,16 @@ pub struct ProposedPool { } impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { if let Some(x) = self.edges.get_output_ref(out_point) { // output consumed if x.is_some() { CellStatus::Dead } else { let (output, data) = self.get_output_with_data(out_point).expect("output"); - let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) + let cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); - if !with_data { - cell_meta.mem_cell_data_hash = None; - } CellStatus::live_cell(cell_meta) } } else if self.edges.get_input_ref(out_point).is_some() { diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 7ac815a646..896e456b19 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -71,7 +71,7 @@ impl MockStore { } impl CellProvider for MockStore { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { match self.0.get_transaction(&out_point.tx_hash()) { Some((tx, _)) => tx .outputs() @@ -82,12 +82,9 @@ impl CellProvider for MockStore { .get(out_point.index().unpack()) .expect("output data"); - let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) + let cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) .out_point(out_point.to_owned()) .build(); - if !with_data { - cell_meta.mem_cell_data = None; - } CellStatus::live_cell(cell_meta) }) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 3a503a1ca1..75671232b7 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -527,7 +527,7 @@ impl<'a> TransactionsProvider<'a> { } impl<'a> CellProvider for TransactionsProvider<'a> { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { match self.transactions.get(&out_point.tx_hash()) { Some(tx) => tx .outputs() @@ -538,10 +538,7 @@ impl<'a> CellProvider for TransactionsProvider<'a> { .get(out_point.index().unpack()) .expect("output data") .raw_data(); - let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); - if !with_data { - cell_meta.mem_cell_data_hash = None; - } + let cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); CellStatus::live_cell(cell_meta) }) .unwrap_or(CellStatus::Unknown), From e64023601c0570770bb446505d6ef73cb2d7fafc Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 20 May 2021 05:45:42 +0800 Subject: [PATCH 02/18] ci: run ci temporarily for development --- .travis.yml | 16 ++++++++-------- azure-pipelines.yml | 6 ++++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index e52b601db7..1546d4ff9a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -64,17 +64,17 @@ matrix: include: # We don't run tests, linters and quck check in fork branch, since they will be covered in PR. - name: Tests on macOS - if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying))' + if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/)' os: osx - name: Tests on Linux - if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux - name: PR Integration if: 'tag IS NOT present AND branch != develop AND branch !~ /^rc\// AND (type = pull_request OR repo != nervosnetwork/ckb)' os: linux script: make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report" integration - name: Linters - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux install: - cargo fmt --version || travis_retry rustup component add rustfmt @@ -99,7 +99,7 @@ matrix: - make clippy - mv rust-toolchain.bak rust-toolchain - name: Quick Check - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux cache: false addons: { apt: { packages: [] } } @@ -119,7 +119,7 @@ matrix: script: - devtools/ci/check-cyclic-dependencies.py --dev - name: Security Audit & Licenses - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux install: - cargo deny --version || travis_retry cargo install cargo-deny --locked @@ -129,7 +129,7 @@ matrix: - make check-licenses - name: WASM build - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux script: - export PATH=/usr/lib/llvm-8/bin:$PATH @@ -141,11 +141,11 @@ matrix: os: linux script: make bench-test - name: Integration on macOS - if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' + if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' os: osx script: make CKB_TEST_ARGS="-c 1 --no-report" integration - name: Integration on Linux - if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' + if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' os: linux script: make CKB_TEST_ARGS="-c 1 --no-report" integration - name: Code Coverage diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a58ea92e92..1d120206d6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,8 @@ jobs: eq(variables['Build.Reason'], 'PullRequest'), ne(variables['System.PullRequest.SourceBranch'], 'develop') ), - eq(variables['Build.SourceBranch'], 'refs/heads/master') + eq(variables['Build.SourceBranch'], 'refs/heads/master'), + startsWith(variables['Build.SourceBranch'], 'refs/heads/ckb2021') ) ) pool: @@ -41,7 +42,8 @@ jobs: ne(variables['Build.Reason'], 'PullRequest'), or( startsWith(variables['Build.SourceBranch'], 'refs/heads/rc/'), - in(variables['Build.SourceBranch'], 'refs/heads/master', 'refs/heads/develop', 'refs/heads/staging2', 'refs/heads/trying') + in(variables['Build.SourceBranch'], 'refs/heads/master', 'refs/heads/develop', 'refs/heads/staging2', 'refs/heads/trying'), + startsWith(variables['Build.SourceBranch'], 'refs/heads/ckb2021') ) ) pool: From 3f35ca84c41fd276402b7dd391d6b5a78092b9c5 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 20 May 2021 07:16:15 +0800 Subject: [PATCH 03/18] feat(hardfork): setup the components for hard fork features --- Cargo.lock | 1 + spec/Cargo.toml | 1 + spec/src/consensus.rs | 19 +++- spec/src/hardfork.rs | 69 ++++++++++++ spec/src/lib.rs | 28 ++++- util/constant/src/hardfork/mainnet.rs | 3 + util/constant/src/hardfork/mod.rs | 4 + util/constant/src/hardfork/testnet.rs | 3 + util/constant/src/lib.rs | 2 + util/types/src/core/hardfork.rs | 153 ++++++++++++++++++++++++++ util/types/src/core/mod.rs | 1 + 11 files changed, 280 insertions(+), 4 deletions(-) create mode 100644 spec/src/hardfork.rs create mode 100644 util/constant/src/hardfork/mainnet.rs create mode 100644 util/constant/src/hardfork/mod.rs create mode 100644 util/constant/src/hardfork/testnet.rs create mode 100644 util/types/src/core/hardfork.rs diff --git a/Cargo.lock b/Cargo.lock index 05ba4ca806..b82111bcea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -549,6 +549,7 @@ dependencies = [ name = "ckb-chain-spec" version = "0.43.0-pre" dependencies = [ + "ckb-constant", "ckb-crypto", "ckb-dao-utils", "ckb-error", diff --git a/spec/Cargo.toml b/spec/Cargo.toml index 9e9f8a09ee..f6d8936fa8 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] serde = { version = "1.0", features = ["derive"] } toml = "0.5" +ckb-constant = { path = "../util/constant", version = "= 0.43.0-pre" } ckb-types = { path = "../util/types", version = "= 0.43.0-pre" } ckb-pow = { path = "../pow", version = "= 0.43.0-pre" } ckb-resource = { path = "../resource", version = "= 0.43.0-pre" } diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index e1279e377d..baea2f1616 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -16,8 +16,9 @@ use ckb_types::{ bytes::Bytes, constants::{BLOCK_VERSION, TX_VERSION}, core::{ - BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, EpochNumber, - EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, TransactionView, Version, + hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, + EpochNumber, EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, + TransactionView, Version, }, h160, h256, packed::{Byte32, CellInput, CellOutput, Script}, @@ -270,6 +271,7 @@ impl ConsensusBuilder { primary_epoch_reward_halving_interval: DEFAULT_PRIMARY_EPOCH_REWARD_HALVING_INTERVAL, permanent_difficulty_in_dummy: false, + hardfork_switch: HardForkSwitch::new_without_any_enabled(), }, } } @@ -445,6 +447,12 @@ impl ConsensusBuilder { self.inner.max_block_proposals_limit = max_block_proposals_limit; self } + + /// Sets a hard fork switch for the new Consensus. + pub fn hardfork_switch(mut self, hardfork_switch: HardForkSwitch) -> Self { + self.inner.hardfork_switch = hardfork_switch; + self + } } /// Struct Consensus defines various parameters that influence chain consensus @@ -519,6 +527,8 @@ pub struct Consensus { pub primary_epoch_reward_halving_interval: EpochNumber, /// Keep difficulty be permanent if the pow is dummy pub permanent_difficulty_in_dummy: bool, + /// A switch to select hard fork features base on the epoch number. + pub hardfork_switch: HardForkSwitch, } // genesis difficulty should not be zero @@ -909,6 +919,11 @@ impl Consensus { self.primary_epoch_reward(epoch.number() + 1) } } + + /// Returns the hardfork switch. + pub fn hardfork_switch(&self) -> &HardForkSwitch { + &self.hardfork_switch + } } /// Corresponding epoch information of next block diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs new file mode 100644 index 0000000000..8d9fce59f9 --- /dev/null +++ b/spec/src/hardfork.rs @@ -0,0 +1,69 @@ +//! Hard forks parameters. + +use ckb_constant::hardfork::{mainnet, testnet}; +use ckb_types::core::{ + hardfork::{HardForkSwitch, HardForkSwitchBuilder}, + EpochNumber, +}; +use serde::{Deserialize, Serialize}; + +/// Hard forks parameters for spec. +#[derive(Default, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct HardForkConfig { + /// Just a dummy field to test hard fork feature. + pub rfc_0000: Option, +} + +macro_rules! check_default { + ($config:ident, $feature:ident, $expected:expr) => { + match $config.$feature { + Some(input) if input != $expected => { + let errmsg = format!( + "The value for hard fork feature \"{}\" is incorrect, actual: {}, expected: {}. + Don't set it for mainnet or testnet, or set it as a correct value.", + stringify!($feature), + input, + $expected, + ); + Err(errmsg) + }, + _ => Ok($expected), + }? + }; +} + +impl HardForkConfig { + /// If all parameters which have been set are correct for mainnet, then + /// sets all `None` to default values, otherwise, return an `Err`. + pub fn complete_mainnet(&self) -> Result { + let mut b = HardForkSwitch::new_builder(); + b = self.update_builder_via_edition(b, mainnet::CKB2021_START_EPOCH)?; + b.build() + } + + /// If all parameters which have been set are correct for testnet, then + /// sets all `None` to default values, otherwise, return an `Err`. + pub fn complete_testnet(&self) -> Result { + let mut b = HardForkSwitch::new_builder(); + b = self.update_builder_via_edition(b, testnet::CKB2021_START_EPOCH)?; + b.build() + } + + fn update_builder_via_edition( + &self, + builder: HardForkSwitchBuilder, + ckb2021: EpochNumber, + ) -> Result { + let builder = builder.rfc_0000(check_default!(self, rfc_0000, ckb2021)); + Ok(builder) + } + + /// Converts to a hard fork switch. + /// + /// Enable features which are set to `None` at the user provided epoch. + pub fn complete_with_default(&self, default: EpochNumber) -> Result { + HardForkSwitch::new_builder() + .rfc_0000(self.rfc_0000.unwrap_or(default)) + .build() + } +} diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 5d2fca0e88..74a87a71d6 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -25,8 +25,9 @@ use ckb_resource::{ use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochNumber, - EpochNumberWithFraction, Ratio, ScriptHashType, TransactionBuilder, TransactionView, + capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, + Cycle, EpochNumber, EpochNumberWithFraction, Ratio, ScriptHashType, TransactionBuilder, + TransactionView, }, h256, packed, prelude::*, @@ -40,9 +41,11 @@ use std::fmt; use std::sync::Arc; pub use error::SpecError; +pub use hardfork::HardForkConfig; pub mod consensus; mod error; +mod hardfork; // Just a random secp256k1 secret key for dep group input cell's lock const SPECIAL_CELL_PRIVKEY: H256 = @@ -220,6 +223,11 @@ pub struct Params { /// See [`orphan_rate_target`](consensus/struct.Consensus.html#structfield.orphan_rate_target) #[serde(skip_serializing_if = "Option::is_none")] pub orphan_rate_target: Option<(u32, u32)>, + /// The parameters for hard fork features. + /// + /// See [`hardfork_switch`](consensus/struct.Consensus.html#structfield.hardfork_switch) + #[serde(skip_serializing_if = "Option::is_none")] + pub hardfork: Option, } impl Params { @@ -459,10 +467,25 @@ impl ChainSpec { Ok(()) } + /// Completes all parameters for hard fork features and creates a hard fork switch. + /// + /// Verify the parameters for mainnet and testnet, because all start epoch numbers + /// for mainnet and testnet are fixed. + fn build_hardfork_switch(&self) -> Result> { + let config = self.params.hardfork.as_ref().cloned().unwrap_or_default(); + match self.name.as_str() { + "mainnet" => config.complete_mainnet(), + "testnet" => config.complete_testnet(), + _ => config.complete_with_default(0), + } + .map_err(Into::into) + } + /// Build consensus instance /// /// [Consensus](consensus/struct.Consensus.html) pub fn build_consensus(&self) -> Result> { + let hardfork_switch = self.build_hardfork_switch()?; let genesis_epoch_ext = build_genesis_epoch_ext( self.params.initial_primary_epoch_reward(), self.genesis.compact_target, @@ -492,6 +515,7 @@ impl ChainSpec { .permanent_difficulty_in_dummy(self.params.permanent_difficulty_in_dummy()) .max_block_proposals_limit(self.params.max_block_proposals_limit()) .orphan_rate_target(self.params.orphan_rate_target()) + .hardfork_switch(hardfork_switch) .build(); Ok(consensus) diff --git a/util/constant/src/hardfork/mainnet.rs b/util/constant/src/hardfork/mainnet.rs new file mode 100644 index 0000000000..034af0f7a3 --- /dev/null +++ b/util/constant/src/hardfork/mainnet.rs @@ -0,0 +1,3 @@ +// TODO ckb2021 Update the epoch number for mainnet. +/// First epoch number for CKB v2021 +pub const CKB2021_START_EPOCH: u64 = u64::MAX; diff --git a/util/constant/src/hardfork/mod.rs b/util/constant/src/hardfork/mod.rs new file mode 100644 index 0000000000..7440163b8f --- /dev/null +++ b/util/constant/src/hardfork/mod.rs @@ -0,0 +1,4 @@ +/// Hardfork constant for mainnet. +pub mod mainnet; +/// Hardfork constant for testnet. +pub mod testnet; diff --git a/util/constant/src/hardfork/testnet.rs b/util/constant/src/hardfork/testnet.rs new file mode 100644 index 0000000000..23a7d3adf2 --- /dev/null +++ b/util/constant/src/hardfork/testnet.rs @@ -0,0 +1,3 @@ +// TODO ckb2021 Update the epoch number for testnet. +/// First epoch number for CKB v2021 +pub const CKB2021_START_EPOCH: u64 = u64::MAX; diff --git a/util/constant/src/lib.rs b/util/constant/src/lib.rs index 3403241463..85eb438468 100644 --- a/util/constant/src/lib.rs +++ b/util/constant/src/lib.rs @@ -1,5 +1,7 @@ //! Collect constants used across ckb components. +/// hardfork constant +pub mod hardfork; /// store constant pub mod store; /// sync constant diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs new file mode 100644 index 0000000000..ed80e29cd8 --- /dev/null +++ b/util/types/src/core/hardfork.rs @@ -0,0 +1,153 @@ +//! Hard forks related types. + +use crate::core::EpochNumber; + +// Defines all methods for a feature. +macro_rules! define_methods { + ($feature:ident, $name_getter:ident, + $name_if_enabled:ident, $name_disable:ident, $rfc_name:literal) => { + define_methods!( + $feature, + $name_getter, + $name_if_enabled, + $name_disable, + concat!( + "Return the first epoch number when the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ") is enabled." + ), + concat!( + "An alias for the method [", + stringify!($feature), + "(&self)](#method.", + stringify!($feature), + ") to let the code to be more readable." + ), + concat!( + "If the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ") is enabled at the provided epoch." + ), + concat!( + "Set the first epoch number of the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ")." + ), + concat!( + "Never enable the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ")." + ) + ); + }; + ($feature:ident, $name_getter_alias:ident, + $name_if_enabled:ident, $name_disable:ident, + $comment_getter:expr,$comment_getter_alias:expr, $comment_if_enabled:expr, + $comment_setter:expr, $comment_disable:expr) => { + impl HardForkSwitch { + #[doc = $comment_getter] + #[inline] + pub fn $feature(&self) -> EpochNumber { + self.$feature + } + #[doc = $comment_getter_alias] + #[inline] + pub fn $name_getter_alias(&self) -> EpochNumber { + self.$feature + } + #[doc = $comment_if_enabled] + #[inline] + pub fn $name_if_enabled(&self, epoch_number: EpochNumber) -> bool { + epoch_number >= self.$feature + } + } + impl HardForkSwitchBuilder { + #[doc = $comment_setter] + #[inline] + pub fn $feature(mut self, epoch_number: EpochNumber) -> Self { + self.$feature = Some(epoch_number); + self + } + #[doc = $comment_disable] + #[inline] + pub fn $name_disable(mut self) -> Self { + self.$feature = Some(EpochNumber::MAX); + self + } + } + }; +} + +/// A switch to select hard fork features base on the epoch number. +/// +/// For safety, all fields are private and not allowed to update. +/// This structure can only be constructed by [`HardForkSwitchBuilder`]. +/// +/// [`HardForkSwitchBuilder`]: struct.HardForkSwitchBuilder.html +#[derive(Debug, Clone)] +pub struct HardForkSwitch { + // TODO hardfork Remove this feature after add real hardfork features. + rfc_0000: EpochNumber, +} + +/// Builder for [`HardForkSwitch`]. +/// +/// [`HardForkSwitch`]: struct.HardForkSwitch.html +#[derive(Debug, Clone, Default)] +pub struct HardForkSwitchBuilder { + rfc_0000: Option, +} + +impl HardForkSwitch { + /// Creates a new builder to build an instance. + pub fn new_builder() -> HardForkSwitchBuilder { + Default::default() + } + + /// Creates a new builder based on the current instance. + pub fn as_builder(&self) -> HardForkSwitchBuilder { + Self::new_builder().rfc_0000(self.rfc_0000()) + } + + /// Creates a new instance that all hard fork features are disabled forever. + pub fn new_without_any_enabled() -> Self { + // Use a builder to ensure all features are set manually. + Self::new_builder().disable_rfc_0000().build().unwrap() + } +} + +define_methods!( + rfc_0000, + dummy_feature, + is_dummy_feature_enabled, + disable_rfc_0000, + "RFC 0000" +); + +impl HardForkSwitchBuilder { + /// Build a new [`HardForkSwitch`]. + /// + /// Returns an error if failed at any check, for example, there maybe are some features depend + /// on others. + /// + /// [`HardForkSwitch`]: struct.HardForkSwitch.html + pub fn build(self) -> Result { + macro_rules! try_find { + ($feature:ident) => { + self.$feature.ok_or_else(|| { + concat!("The feature ", stringify!($feature), " isn't configured.").to_owned() + })?; + }; + } + let rfc_0000 = try_find!(rfc_0000); + Ok(HardForkSwitch { rfc_0000 }) + } +} diff --git a/util/types/src/core/mod.rs b/util/types/src/core/mod.rs index d0d9fc1f95..babf79a4b3 100644 --- a/util/types/src/core/mod.rs +++ b/util/types/src/core/mod.rs @@ -13,6 +13,7 @@ pub mod cell; pub mod error; +pub mod hardfork; pub mod service; pub mod tx_pool; From b546065b29d849807b0a913c3f732379b3e2d40b Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 20 May 2021 09:42:47 +0800 Subject: [PATCH 04/18] refactor: let verifiers know the real environment that the transaction is in --- spec/src/consensus.rs | 12 ++ test/src/specs/tx_pool/valid_since.rs | 44 ++++++- tx-pool/src/pool.rs | 10 +- tx-pool/src/process.rs | 26 +++- tx-pool/src/util.rs | 25 +--- util/snapshot/src/lib.rs | 8 +- util/types/src/core/advanced_builders.rs | 15 +++ util/types/src/core/extras.rs | 15 +++ .../src/contextual_block_verifier.rs | 72 +++++------ verification/src/lib.rs | 2 + .../src/tests/transaction_verifier.rs | 71 ++++++----- verification/src/transaction_verifier.rs | 106 +++++----------- verification/src/tx_verify_env.rs | 120 ++++++++++++++++++ 13 files changed, 346 insertions(+), 180 deletions(-) create mode 100644 verification/src/tx_verify_env.rs diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index baea2f1616..2bf066b662 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -392,6 +392,12 @@ impl ConsensusBuilder { self } + /// Sets median_time_block_count for the new Consensus. + pub fn median_time_block_count(mut self, median_time_block_count: usize) -> Self { + self.inner.median_time_block_count = median_time_block_count; + self + } + /// Sets tx_proposal_window for the new Consensus. pub fn tx_proposal_window(mut self, proposal_window: ProposalWindow) -> Self { self.inner.tx_proposal_window = proposal_window; @@ -926,6 +932,12 @@ impl Consensus { } } +/// Trait for consensus provider. +pub trait ConsensusProvider { + /// Returns the `Consensus`. + fn get_consensus(&self) -> &Consensus; +} + /// Corresponding epoch information of next block pub enum NextBlockEpoch { /// Next block is the head block of epoch diff --git a/test/src/specs/tx_pool/valid_since.rs b/test/src/specs/tx_pool/valid_since.rs index b8d0df1e17..886cd0c306 100644 --- a/test/src/specs/tx_pool/valid_since.rs +++ b/test/src/specs/tx_pool/valid_since.rs @@ -1,4 +1,7 @@ -use crate::util::mining::{mine, mine_until_out_bootstrap_period}; +use crate::util::{ + check, + mining::{mine, mine_until_out_bootstrap_period}, +}; use crate::utils::{ assert_send_transaction_fail, since_from_absolute_block_number, since_from_absolute_timestamp, since_from_relative_block_number, since_from_relative_timestamp, @@ -6,7 +9,7 @@ use crate::utils::{ use crate::{Node, Spec, DEFAULT_TX_PROPOSAL_WINDOW}; use ckb_logger::info; -use ckb_types::core::BlockNumber; +use ckb_types::core::{BlockNumber, TransactionView}; use std::thread::sleep; use std::time::Duration; @@ -32,7 +35,8 @@ impl Spec for ValidSince { impl ValidSince { pub fn test_since_relative_block_number(&self, node: &Node) { mine_until_out_bootstrap_period(node); - let relative: BlockNumber = 5; + let started_tip_number = node.get_tip_block_number(); + let relative: BlockNumber = 10; let since = since_from_relative_block_number(relative); let transaction = { let cellbase = node.get_tip_block().transactions()[0].clone(); @@ -40,7 +44,7 @@ impl ValidSince { }; // Failed to send transaction since SinceImmaturity - for _ in 1..relative { + for _ in 1..=(relative - 3) { assert_send_transaction_fail( node, &transaction, @@ -56,11 +60,13 @@ impl ValidSince { .is_ok(), "transaction is ok, tip is equal to relative since block number", ); + + Self::check_committing_process(node, &transaction, started_tip_number + relative); } pub fn test_since_absolute_block_number(&self, node: &Node) { mine_until_out_bootstrap_period(node); - let absolute: BlockNumber = node.rpc_client().get_tip_block_number() + 5; + let absolute: BlockNumber = node.rpc_client().get_tip_block_number() + 10; let since = since_from_absolute_block_number(absolute); let transaction = { let cellbase = node.get_tip_block().transactions()[0].clone(); @@ -69,7 +75,7 @@ impl ValidSince { // Failed to send transaction since SinceImmaturity let tip_number = node.rpc_client().get_tip_block_number(); - for _ in tip_number + 1..absolute { + for _ in tip_number + 1..=(absolute - 3) { assert_send_transaction_fail( node, &transaction, @@ -85,6 +91,8 @@ impl ValidSince { .is_ok(), "transaction is ok, tip is equal to absolute since block number", ); + + Self::check_committing_process(node, &transaction, absolute); } pub fn test_since_relative_median_time(&self, node: &Node) { @@ -255,4 +263,28 @@ impl ValidSince { mine(&node, 1); node.assert_tx_pool_size(0, 0); } + + fn check_committing_process( + node: &Node, + transaction: &TransactionView, + committed_at: BlockNumber, + ) { + // Pending + node.assert_tx_pool_size(1, 0); + assert!(check::is_transaction_pending(node, transaction)); + // Gap + mine(&node, 1); + node.assert_tx_pool_size(1, 0); + assert!(check::is_transaction_pending(node, transaction)); + // Proposed + mine(&node, 1); + node.assert_tx_pool_size(0, 1); + assert!(check::is_transaction_proposed(node, transaction)); + // Committed + mine(&node, 1); + node.assert_tx_pool_size(0, 0); + assert!(check::is_transaction_committed(node, transaction)); + + assert_eq!(node.get_tip_block_number(), committed_at); + } } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 014bb45f1c..2aa4a726af 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -18,7 +18,7 @@ use ckb_types::{ }, packed::{Byte32, OutPoint, ProposalShortId}, }; -use ckb_verification::cache::CacheEntry; +use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use faketime::unix_time_as_millis; use lru::LruCache; use std::collections::HashSet; @@ -352,7 +352,9 @@ impl TxPool { self.check_rtx_from_pending_and_proposed(&rtx)?; let snapshot = self.snapshot(); let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx(snapshot, &rtx, cache_entry, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_proposed(tip_header, 0); + let verified = verify_rtx(snapshot, &rtx, &tx_env, cache_entry, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, verified.fee, size); let tx_hash = entry.transaction().hash(); @@ -372,7 +374,9 @@ impl TxPool { self.check_rtx_from_proposed(&rtx)?; let snapshot = self.snapshot(); let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx(snapshot, &rtx, cache_entry, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_proposed(tip_header, 1); + let verified = verify_rtx(snapshot, &rtx, &tx_env, cache_entry, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, verified.fee, size); let tx_hash = entry.transaction().hash(); diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index d151d447ef..2e32f47c1f 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -24,14 +24,14 @@ use ckb_types::{ get_related_dep_out_points, OverlayCellChecker, ResolvedTransaction, TransactionsChecker, }, - BlockView, Capacity, Cycle, EpochExt, ScriptHashType, TransactionView, UncleBlockView, - Version, + BlockView, Capacity, Cycle, EpochExt, HeaderView, ScriptHashType, TransactionView, + UncleBlockView, Version, }, packed::{Byte32, CellbaseWitness, OutPoint, ProposalShortId, Script}, prelude::*, }; use ckb_util::LinkedHashSet; -use ckb_verification::cache::CacheEntry; +use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use faketime::unix_time_as_millis; use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; @@ -49,12 +49,23 @@ pub enum PlugTarget { Proposed, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TxStatus { Fresh, Gap, Proposed, } +impl TxStatus { + fn with_env(self, header: &HeaderView) -> TxVerifyEnv { + match self { + TxStatus::Fresh => TxVerifyEnv::new_submit(header), + TxStatus::Gap => TxVerifyEnv::new_proposed(header, 0), + TxStatus::Proposed => TxVerifyEnv::new_proposed(header, 1), + } + } +} + impl TxPoolService { async fn get_block_template_cache( &self, @@ -560,7 +571,9 @@ impl TxPoolService { let verify_cache = self.fetch_tx_verify_cache(&tx_hash).await; let max_cycles = max_cycles.unwrap_or(self.tx_pool_config.max_tx_verify_cycles); - let verified = verify_rtx(&snapshot, &rtx, verify_cache, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = status.with_env(tip_header); + let verified = verify_rtx(&snapshot, &rtx, &tx_env, verify_cache, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, fee, tx_size); @@ -631,8 +644,11 @@ impl TxPoolService { if let Ok((rtx, status)) = resolve_tx(tx_pool, tx_pool.snapshot(), tx) { if let Ok(fee) = check_tx_fee(tx_pool, tx_pool.snapshot(), &rtx, tx_size) { let verify_cache = fetched_cache.get(&tx_hash).cloned(); + let snapshot = tx_pool.snapshot(); + let tip_header = snapshot.tip_header(); + let tx_env = status.with_env(tip_header); if let Ok(verified) = - verify_rtx(tx_pool.snapshot(), &rtx, verify_cache, max_cycles) + verify_rtx(snapshot, &rtx, &tx_env, verify_cache, max_cycles) { let entry = TxEntry::new(rtx, verified.cycles, fee, tx_size); if let Err(e) = _submit_entry(tx_pool, status, entry, &self.callbacks) { diff --git a/tx-pool/src/util.rs b/tx-pool/src/util.rs index 84923a6ee5..99fabda09b 100644 --- a/tx-pool/src/util.rs +++ b/tx-pool/src/util.rs @@ -7,7 +7,7 @@ use ckb_store::ChainStore; use ckb_types::core::{cell::ResolvedTransaction, Capacity, Cycle, TransactionView}; use ckb_verification::{ cache::CacheEntry, ContextualTransactionVerifier, NonContextualTransactionVerifier, - TimeRelativeTransactionVerifier, + TimeRelativeTransactionVerifier, TxVerifyEnv, }; use tokio::task::block_in_place; @@ -74,35 +74,24 @@ pub(crate) fn non_contextual_verify( pub(crate) fn verify_rtx( snapshot: &Snapshot, rtx: &ResolvedTransaction, + tx_env: &TxVerifyEnv, cache_entry: Option, max_tx_verify_cycles: Cycle, ) -> Result { - let tip_header = snapshot.tip_header(); - let tip_number = tip_header.number(); - let epoch = tip_header.epoch(); let consensus = snapshot.consensus(); if let Some(cached) = cache_entry { - TimeRelativeTransactionVerifier::new( - &rtx, - snapshot, - tip_number + 1, - epoch, - tip_header.hash(), - consensus, - ) - .verify() - .map(|_| cached) - .map_err(Reject::Verification) + TimeRelativeTransactionVerifier::new(&rtx, snapshot, tx_env) + .verify() + .map(|_| cached) + .map_err(Reject::Verification) } else { block_in_place(|| { ContextualTransactionVerifier::new( &rtx, - tip_number + 1, - epoch, - tip_header.hash(), consensus, &snapshot.as_data_provider(), + tx_env, ) .verify(max_tx_verify_cycles, false) .map_err(Reject::Verification) diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 02ea88f5a6..2fc1745061 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -1,7 +1,7 @@ //! Rocksdb snapshot wrapper use arc_swap::{ArcSwap, Guard}; -use ckb_chain_spec::consensus::Consensus; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_db::{ iter::{DBIter, IteratorMode}, DBPinnableSlice, @@ -210,3 +210,9 @@ impl HeaderProvider for Snapshot { self.store.get_block_header(hash) } } + +impl ConsensusProvider for Snapshot { + fn get_consensus(&self) -> &Consensus { + self.consensus() + } +} diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index 16ffeeef7d..4c0e3e7324 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -470,6 +470,11 @@ impl packed::Header { } impl packed::Block { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> BlockBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> BlockBuilder { BlockBuilder::default() @@ -491,6 +496,11 @@ impl packed::Block { } impl core::TransactionView { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> TransactionBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> TransactionBuilder { self.data().as_advanced_builder() @@ -498,6 +508,11 @@ impl core::TransactionView { } impl core::HeaderView { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> HeaderBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> HeaderBuilder { self.data().as_advanced_builder() diff --git a/util/types/src/core/extras.rs b/util/types/src/core/extras.rs index 867b1bb384..9f703397a4 100644 --- a/util/types/src/core/extras.rs +++ b/util/types/src/core/extras.rs @@ -428,6 +428,21 @@ impl EpochNumberWithFraction { self.0 } + /// Estimate the floor limit of epoch number after N blocks. + /// + /// Since we couldn't know the length of next epoch before reach the next epoch, + /// this function could only return `self.number()` or `self.number()+1`. + pub fn minimum_epoch_number_after_n_blocks(self, n: BlockNumber) -> EpochNumber { + let number = self.number(); + let length = self.length(); + let index = self.index(); + if index + n >= length { + number + 1 + } else { + number + } + } + /// TODO(doc): @quake // One caveat here, is that if the user specifies a zero epoch length either // deliberately, or by accident, calling to_rational() after that might diff --git a/verification/contextual/src/contextual_block_verifier.rs b/verification/contextual/src/contextual_block_verifier.rs index 76ea06548d..7259a7ca38 100644 --- a/verification/contextual/src/contextual_block_verifier.rs +++ b/verification/contextual/src/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use crate::uncles_verifier::{UncleProvider, UnclesVerifier}; use ckb_async_runtime::Handle; -use ckb_chain_spec::consensus::Consensus; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_dao::DaoCalculator; use ckb_error::Error; use ckb_logger::error_target; @@ -12,8 +12,7 @@ use ckb_types::{ core::error::OutPointError, core::{ cell::{HeaderChecker, ResolvedTransaction}, - BlockNumber, BlockReward, BlockView, Capacity, Cycle, EpochExt, EpochNumberWithFraction, - HeaderView, TransactionView, + BlockReward, BlockView, Capacity, Cycle, EpochExt, HeaderView, TransactionView, }, packed::{Byte32, CellOutput, Script}, prelude::*, @@ -23,7 +22,7 @@ use ckb_verification::{ BlockErrorKind, CellbaseError, CommitError, ContextualTransactionVerifier, TimeRelativeTransactionVerifier, UnknownParentError, }; -use ckb_verification::{BlockTransactionsError, EpochError}; +use ckb_verification::{BlockTransactionsError, EpochError, TxVerifyEnv}; use ckb_verification_traits::Switch; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use std::collections::{HashMap, HashSet}; @@ -72,6 +71,12 @@ impl<'a, CS: ChainStore<'a>> HeaderChecker for VerifyContext<'a, CS> { } } +impl<'a, CS: ChainStore<'a>> ConsensusProvider for VerifyContext<'a, CS> { + fn get_consensus(&self) -> &Consensus { + &self.consensus + } +} + pub struct UncleVerifierContext<'a, 'b, CS> { epoch: &'b EpochExt, context: &'a VerifyContext<'a, CS>, @@ -307,26 +312,19 @@ impl<'a, 'b, 'c, CS: ChainStore<'a>> DaoHeaderVerifier<'a, 'b, 'c, CS> { struct BlockTxsVerifier<'a, CS> { context: &'a VerifyContext<'a, CS>, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, + header: HeaderView, resolved: &'a [ResolvedTransaction], } impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { - #[allow(clippy::too_many_arguments)] pub fn new( context: &'a VerifyContext<'a, CS>, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, + header: HeaderView, resolved: &'a [ResolvedTransaction], ) -> Self { BlockTxsVerifier { context, - block_number, - epoch_number_with_fraction, - parent_hash, + header, resolved, } } @@ -375,32 +373,24 @@ impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { .enumerate() .map(|(index, tx)| { let tx_hash = tx.transaction.hash(); + let tx_env = TxVerifyEnv::new_commit(&self.header); if let Some(cache_entry) = fetched_cache.get(&tx_hash) { - TimeRelativeTransactionVerifier::new( - &tx, - self.context, - self.block_number, - self.epoch_number_with_fraction, - self.parent_hash.clone(), - self.context.consensus, - ) - .verify() - .map_err(|error| { - BlockTransactionsError { - index: index as u32, - error, - } - .into() - }) - .map(|_| (tx_hash, *cache_entry)) + TimeRelativeTransactionVerifier::new(&tx, self.context, &tx_env) + .verify() + .map_err(|error| { + BlockTransactionsError { + index: index as u32, + error, + } + .into() + }) + .map(|_| (tx_hash, *cache_entry)) } else { ContextualTransactionVerifier::new( &tx, - self.block_number, - self.epoch_number_with_fraction, - self.parent_hash.clone(), self.context.consensus, &self.context.store.as_data_provider(), + &tx_env, ) .verify( self.context.consensus.max_block_cycles(), @@ -506,6 +496,7 @@ impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { ) -> Result<(Cycle, Vec), Error> { let timer = Timer::start(); let parent_hash = block.data().header().raw().parent_hash(); + let header = block.header(); let parent = self .context .store @@ -547,14 +538,11 @@ impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { RewardVerifier::new(&self.context, resolved, &parent).verify()?; } - let ret = BlockTxsVerifier::new( - &self.context, - block.number(), - block.epoch(), - parent_hash, - resolved, - ) - .verify(txs_verify_cache, handle, switch.disable_script())?; + let ret = BlockTxsVerifier::new(&self.context, header, resolved).verify( + txs_verify_cache, + handle, + switch.disable_script(), + )?; metrics!(timing, "ckb.contextual_verified_block", timer.stop()); Ok(ret) } diff --git a/verification/src/lib.rs b/verification/src/lib.rs index de3311821d..851e7d0f3f 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -8,6 +8,7 @@ mod error; mod genesis_verifier; mod header_verifier; mod transaction_verifier; +mod tx_verify_env; #[cfg(test)] mod tests; @@ -24,6 +25,7 @@ pub use crate::transaction_verifier::{ ContextualTransactionVerifier, NonContextualTransactionVerifier, ScriptVerifier, Since, SinceMetric, TimeRelativeTransactionVerifier, TransactionVerifier, }; +pub use crate::tx_verify_env::TxVerifyEnv; /// Maximum amount of time that a block timestamp is allowed to exceed the /// current time before the block will be accepted. diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index 70382a494f..b0040a0cf1 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -3,8 +3,8 @@ use super::super::transaction_verifier::{ Since, SinceVerifier, SizeVerifier, VersionVerifier, }; use crate::error::TransactionErrorSource; -use crate::TransactionError; -use ckb_chain_spec::{build_genesis_type_id_script, OUTPUT_INDEX_DAO}; +use crate::{TransactionError, TxVerifyEnv}; +use ckb_chain_spec::{build_genesis_type_id_script, consensus::ConsensusBuilder, OUTPUT_INDEX_DAO}; use ckb_error::{assert_error_eq, Error}; use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; use ckb_traits::HeaderProvider; @@ -14,8 +14,8 @@ use ckb_types::{ core::{ capacity_bytes, cell::{CellMetaBuilder, ResolvedTransaction}, - BlockNumber, Capacity, EpochNumber, EpochNumberWithFraction, TransactionBuilder, - TransactionInfo, TransactionView, + BlockNumber, Capacity, EpochNumber, EpochNumberWithFraction, HeaderView, + TransactionBuilder, TransactionInfo, TransactionView, }, h256, packed::{CellDep, CellInput, CellOutput, OutPoint}, @@ -392,15 +392,19 @@ fn verify_since<'a, DL: HeaderProvider>( epoch_number: EpochNumber, ) -> Result<(), Error> { let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); - SinceVerifier::new( - rtx, - data_loader, - block_number, - EpochNumberWithFraction::new(epoch_number, 0, 10), - 11, - parent_hash.as_ref().to_owned(), - ) - .verify() + let consensus = ConsensusBuilder::default() + .median_time_block_count(11) + .build(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(epoch_number, 0, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + SinceVerifier::new(rtx, &consensus, data_loader, &tx_env).verify() } #[test] @@ -495,30 +499,35 @@ fn test_fraction_epoch_since_verify() { &tx, MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), ); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .build(); let median_time_context = MockMedianTime::new(vec![0; 11]); let block_number = 1000; let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); - let result = SinceVerifier::new( - &rtx, - &median_time_context, - block_number, - EpochNumberWithFraction::new(16, 1, 10), - MOCK_MEDIAN_TIME_COUNT, - parent_hash.as_ref().to_owned(), - ) - .verify(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(16, 1, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + let result = SinceVerifier::new(&rtx, &consensus, &median_time_context, &tx_env).verify(); assert_error_eq!(result.unwrap_err(), TransactionError::Immature { index: 0 }); - let result = SinceVerifier::new( - &rtx, - &median_time_context, - block_number, - EpochNumberWithFraction::new(16, 5, 10), - MOCK_MEDIAN_TIME_COUNT, - parent_hash.as_ref().to_owned(), - ) - .verify(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(16, 5, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + let result = SinceVerifier::new(&rtx, &consensus, &median_time_context, &tx_env).verify(); assert!(result.is_ok()); } diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 08f1795007..f3dd19336f 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -1,7 +1,7 @@ use crate::cache::CacheEntry; use crate::error::TransactionErrorSource; -use crate::TransactionError; -use ckb_chain_spec::consensus::Consensus; +use crate::{TransactionError, TxVerifyEnv}; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_dao::DaoCalculator; use ckb_error::Error; use ckb_metrics::{metrics, Timer}; @@ -10,8 +10,7 @@ use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; use ckb_types::{ core::{ cell::{CellMeta, ResolvedTransaction}, - BlockNumber, Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, - Version, + Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, Version, }, packed::Byte32, prelude::*, @@ -30,30 +29,13 @@ pub struct TimeRelativeTransactionVerifier<'a, M> { pub(crate) since: SinceVerifier<'a, M>, } -impl<'a, DL: HeaderProvider> TimeRelativeTransactionVerifier<'a, DL> { +impl<'a, DL: HeaderProvider + ConsensusProvider> TimeRelativeTransactionVerifier<'a, DL> { /// Creates a new TimeRelativeTransactionVerifier - pub fn new( - rtx: &'a ResolvedTransaction, - data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, - consensus: &'a Consensus, - ) -> Self { + pub fn new(rtx: &'a ResolvedTransaction, data_loader: &'a DL, tx_env: &'a TxVerifyEnv) -> Self { + let consensus = data_loader.get_consensus(); TimeRelativeTransactionVerifier { - maturity: MaturityVerifier::new( - &rtx, - epoch_number_with_fraction, - consensus.cellbase_maturity(), - ), - since: SinceVerifier::new( - rtx, - data_loader, - block_number, - epoch_number_with_fraction, - consensus.median_time_block_count(), - parent_hash, - ), + maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), + since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), } } @@ -126,31 +108,17 @@ where DL: CellDataProvider + HeaderProvider + EpochProvider, { /// Creates a new ContextualTransactionVerifier - #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> Self { ContextualTransactionVerifier { - maturity: MaturityVerifier::new( - &rtx, - epoch_number_with_fraction, - consensus.cellbase_maturity(), - ), + maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), script: ScriptVerifier::new(rtx, data_loader), capacity: CapacityVerifier::new(rtx, consensus.dao_type_hash()), - since: SinceVerifier::new( - rtx, - data_loader, - block_number, - epoch_number_with_fraction, - consensus.median_time_block_count(), - parent_hash, - ), + since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), fee_calculator: FeeCalculator::new(rtx, consensus, data_loader), } } @@ -186,25 +154,15 @@ pub struct TransactionVerifier<'a, DL> { impl<'a, DL: HeaderProvider + CellDataProvider + EpochProvider> TransactionVerifier<'a, DL> { /// Creates a new TransactionVerifier - #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> Self { TransactionVerifier { non_contextual: NonContextualTransactionVerifier::new(&rtx.transaction, consensus), - contextual: ContextualTransactionVerifier::new( - rtx, - block_number, - epoch_number_with_fraction, - parent_hash, - consensus, - data_loader, - ), + contextual: ContextualTransactionVerifier::new(rtx, consensus, data_loader, tx_env), } } @@ -588,31 +546,25 @@ impl Since { /// [tx-since-specification](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0017-tx-valid-since/0017-tx-valid-since.md#detailed-specification pub struct SinceVerifier<'a, DL> { rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, - median_block_count: usize, + tx_env: &'a TxVerifyEnv, median_timestamps_cache: RefCell>, } impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { pub fn new( rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - median_block_count: usize, - parent_hash: Byte32, + tx_env: &'a TxVerifyEnv, ) -> Self { let median_timestamps_cache = RefCell::new(LruCache::new(rtx.resolved_inputs.len())); SinceVerifier { rtx, + consensus, data_loader, - block_number, - epoch_number_with_fraction, - parent_hash, - median_block_count, + tx_env, median_timestamps_cache, } } @@ -627,9 +579,10 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { return *median_time; } + let median_block_count = self.consensus.median_time_block_count(); let median_time = self .data_loader - .block_median_time(block_hash, self.median_block_count); + .block_median_time(block_hash, median_block_count); self.median_timestamps_cache .borrow_mut() .put(block_hash.clone(), median_time); @@ -640,17 +593,19 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { if since.is_absolute() { match since.extract_metric() { Some(SinceMetric::BlockNumber(block_number)) => { - if self.block_number < block_number { + let proposal_window = self.consensus.tx_proposal_window(); + if self.tx_env.block_number(proposal_window) < block_number { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { - if self.epoch_number_with_fraction < epoch_number_with_fraction { + if self.tx_env.epoch() < epoch_number_with_fraction { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::Timestamp(timestamp)) => { - let tip_timestamp = self.block_median_time(&self.parent_hash); + let parent_hash = self.tx_env.parent_hash(); + let tip_timestamp = self.block_median_time(&parent_hash); if tip_timestamp < timestamp { return Err((TransactionError::Immature { index }).into()); } @@ -676,12 +631,14 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { }?; match since.extract_metric() { Some(SinceMetric::BlockNumber(block_number)) => { - if self.block_number < info.block_number + block_number { + let proposal_window = self.consensus.tx_proposal_window(); + if self.tx_env.block_number(proposal_window) < info.block_number + block_number + { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { - let a = self.epoch_number_with_fraction.to_rational(); + let a = self.tx_env.epoch().to_rational(); let b = info.block_epoch.to_rational() + epoch_number_with_fraction.to_rational(); if a < b { @@ -693,8 +650,9 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { // parent of current block. // pass_median_time(input_cell's block) starts with cell_block_number - 1, // which is the parent of input_cell's block + let parent_hash = self.tx_env.parent_hash(); let cell_median_timestamp = self.parent_median_time(&info.block_hash); - let current_median_time = self.block_median_time(&self.parent_hash); + let current_median_time = self.block_median_time(&parent_hash); if current_median_time < cell_median_timestamp + timestamp { return Err((TransactionError::Immature { index }).into()); } diff --git a/verification/src/tx_verify_env.rs b/verification/src/tx_verify_env.rs new file mode 100644 index 0000000000..e840d5d7d8 --- /dev/null +++ b/verification/src/tx_verify_env.rs @@ -0,0 +1,120 @@ +//! Transaction verification environment. + +use ckb_chain_spec::consensus::ProposalWindow; +use ckb_types::{ + core::{BlockNumber, EpochNumber, EpochNumberWithFraction, HeaderView}, + packed::Byte32, +}; + +/// The phase that transactions are in. +#[derive(Debug, Clone, Copy)] +enum TxVerifyPhase { + /// The transaction has just been submitted. + /// + /// So the transaction will be: + /// - proposed after (or in) the `tip_number + 1` block. + /// - committed after (or in) `tip_number + 1 + proposal_window.closest()` block. + Submitted, + /// The transaction has already been proposed before several blocks. + /// + /// Assume that the inner block number is `N`. + /// So the transaction is proposed in the `tip_number - N` block. + /// Then it will be committed after (or in) the `tip_number - N + proposal_window.closest()` block. + Proposed(BlockNumber), + /// The transaction is commit. + /// + /// So the transaction will be committed in current block. + Committed, +} + +/// The environment that transactions are in. +#[derive(Debug, Clone)] +pub struct TxVerifyEnv { + // Please keep these fields to be private. + // So we can update this struct easier when we want to add more data. + phase: TxVerifyPhase, + // Current Tip Environment + number: BlockNumber, + epoch: EpochNumberWithFraction, + hash: Byte32, + parent_hash: Byte32, +} + +impl TxVerifyEnv { + /// The transaction has just been submitted. + /// + /// The input is current tip header. + pub fn new_submit(header: &HeaderView) -> Self { + Self { + phase: TxVerifyPhase::Submitted, + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The transaction has already been proposed before several blocks. + /// + /// The input is current tip header and how many blocks have been passed since the transaction was proposed. + pub fn new_proposed(header: &HeaderView, n_blocks: BlockNumber) -> Self { + Self { + phase: TxVerifyPhase::Proposed(n_blocks), + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The transaction will committed in current block. + /// + /// The input is current tip header. + pub fn new_commit(header: &HeaderView) -> Self { + Self { + phase: TxVerifyPhase::Committed, + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The block number of the earliest block which the transaction will committed in. + pub fn block_number(&self, proposal_window: ProposalWindow) -> BlockNumber { + match self.phase { + TxVerifyPhase::Submitted => self.number + 1 + proposal_window.closest(), + TxVerifyPhase::Proposed(already_proposed) => { + self.number.saturating_sub(already_proposed) + proposal_window.closest() + } + TxVerifyPhase::Committed => self.number, + } + } + + /// The epoch number of the earliest epoch which the transaction will committed in. + pub fn epoch_number(&self, proposal_window: ProposalWindow) -> EpochNumber { + let n_blocks = match self.phase { + TxVerifyPhase::Submitted => 1 + proposal_window.closest(), + TxVerifyPhase::Proposed(already_proposed) => { + proposal_window.closest().saturating_sub(already_proposed) + } + TxVerifyPhase::Committed => 0, + }; + self.epoch.minimum_epoch_number_after_n_blocks(n_blocks) + } + + /// The parent block hash of the earliest block which the transaction will committed in. + pub fn parent_hash(&self) -> Byte32 { + match self.phase { + TxVerifyPhase::Submitted => &self.hash, + TxVerifyPhase::Proposed(_) => &self.hash, + TxVerifyPhase::Committed => &self.parent_hash, + } + .to_owned() + } + + /// The earliest epoch which the transaction will committed in. + pub fn epoch(&self) -> EpochNumberWithFraction { + self.epoch + } +} From 92a7256fd81d949860f3c25e6bd0a640d3edb35c Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Fri, 21 May 2021 18:13:48 +0800 Subject: [PATCH 05/18] feat(hardfork): in the "since epoch", the index should be less than length --- spec/src/hardfork.rs | 10 +- test/src/main.rs | 3 + test/src/rpc.rs | 2 +- test/src/specs/dao/dao_user.rs | 2 +- test/src/specs/hardfork/mod.rs | 3 + test/src/specs/hardfork/v2021/mod.rs | 3 + test/src/specs/hardfork/v2021/since.rs | 205 ++++++++++++++++++ test/src/specs/mod.rs | 2 + test/src/util/check.rs | 13 +- test/src/util/mining.rs | 21 +- test/src/utils.rs | 10 +- test/template/specs/integration.toml | 3 + util/types/src/core/extras.rs | 8 + util/types/src/core/hardfork.rs | 28 ++- .../src/tests/transaction_verifier.rs | 63 ++++++ verification/src/transaction_verifier.rs | 16 ++ 16 files changed, 366 insertions(+), 26 deletions(-) create mode 100644 test/src/specs/hardfork/mod.rs create mode 100644 test/src/specs/hardfork/v2021/mod.rs create mode 100644 test/src/specs/hardfork/v2021/since.rs diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 8d9fce59f9..5b87e59374 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -9,9 +9,11 @@ use serde::{Deserialize, Serialize}; /// Hard forks parameters for spec. #[derive(Default, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] pub struct HardForkConfig { - /// Just a dummy field to test hard fork feature. - pub rfc_0000: Option, + // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0223: Option, } macro_rules! check_default { @@ -54,7 +56,7 @@ impl HardForkConfig { builder: HardForkSwitchBuilder, ckb2021: EpochNumber, ) -> Result { - let builder = builder.rfc_0000(check_default!(self, rfc_0000, ckb2021)); + let builder = builder.rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)); Ok(builder) } @@ -63,7 +65,7 @@ impl HardForkConfig { /// Enable features which are set to `None` at the user provided epoch. pub fn complete_with_default(&self, default: EpochNumber) -> Result { HardForkSwitch::new_builder() - .rfc_0000(self.rfc_0000.unwrap_or(default)) + .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .build() } } diff --git a/test/src/main.rs b/test/src/main.rs index 8ac136c661..fd41842e4c 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -488,6 +488,9 @@ fn all_specs() -> Vec> { Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplate), Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplateMultiple), Box::new(DuplicateCellDeps), + // Test hard fork features + Box::new(CheckAbsoluteEpochSince), + Box::new(CheckRelativeEpochSince), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/rpc.rs b/test/src/rpc.rs index ea90842c05..f5f8860d1c 100644 --- a/test/src/rpc.rs +++ b/test/src/rpc.rs @@ -85,7 +85,7 @@ impl RpcClient { pub fn get_tip_header(&self) -> HeaderView { self.inner .get_tip_header() - .expect("rpc call get_block_hash") + .expect("rpc call get_tip_header") } pub fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> CellWithStatus { diff --git a/test/src/specs/dao/dao_user.rs b/test/src/specs/dao/dao_user.rs index 9d60ca2b42..285e3cde2f 100644 --- a/test/src/specs/dao/dao_user.rs +++ b/test/src/specs/dao/dao_user.rs @@ -133,7 +133,7 @@ impl<'a> DAOUser<'a> { let prepare_utxo_headers = self.utxo_headers(&self.prepare_utxo); let inputs = prepare_utxo_headers.iter().map(|(txo, _)| { let minimal_unlock_point = self.minimal_unlock_point(&txo.out_point()); - let since = since_from_absolute_epoch_number(minimal_unlock_point.full_value()); + let since = since_from_absolute_epoch_number(minimal_unlock_point); CellInput::new(txo.out_point(), since) }); let output_capacity = deposit_utxo_headers diff --git a/test/src/specs/hardfork/mod.rs b/test/src/specs/hardfork/mod.rs new file mode 100644 index 0000000000..422849ec4a --- /dev/null +++ b/test/src/specs/hardfork/mod.rs @@ -0,0 +1,3 @@ +mod v2021; + +pub use v2021::*; diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs new file mode 100644 index 0000000000..42bdaf1706 --- /dev/null +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -0,0 +1,3 @@ +mod since; + +pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; diff --git a/test/src/specs/hardfork/v2021/since.rs b/test/src/specs/hardfork/v2021/since.rs new file mode 100644 index 0000000000..b42923d62f --- /dev/null +++ b/test/src/specs/hardfork/v2021/since.rs @@ -0,0 +1,205 @@ +use crate::util::{ + check::{self, assert_epoch_should_be}, + mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, +}; +use crate::utils::{ + assert_send_transaction_fail, since_from_absolute_epoch_number, + since_from_relative_epoch_number, +}; +use crate::{Node, Spec}; + +use ckb_logger::info; +use ckb_types::core::{EpochNumberWithFraction, TransactionView}; + +const GENESIS_EPOCH_LENGTH: u64 = 10; + +const ERROR_IMMATURE: &str = "TransactionFailedToVerify: Verification failed Transaction(Immature("; +const ERROR_INVALID_SINCE: &str = + "TransactionFailedToVerify: Verification failed Transaction(InvalidSince("; + +pub struct CheckAbsoluteEpochSince; +pub struct CheckRelativeEpochSince; + +impl Spec for CheckAbsoluteEpochSince { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, since absolute epoch failed"); + let tx = create_tx_since_absolute_epoch(node, 1, 3); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + } + { + info!("CKB v2019, since absolute epoch failed"); + let tx = create_tx_since_absolute_epoch(node, 1, 2); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine_until_epoch(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, since absolute epoch failed (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + } + { + info!("CKB v2019, since absolute epoch ok (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length - 2); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine(&node, 1); + { + info!("CKB v2019, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, (epoch_length - 1) + epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + mine(&node, 1); + assert_epoch_should_be(node, 2, 0, epoch_length); + { + info!("CKB v2021, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, epoch_length * 2); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch ok (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 2, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine_until_epoch(node, 3, 0, epoch_length); + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, epoch_length * 3); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length * 2); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 2, epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch ok"); + let tx = create_tx_since_absolute_epoch(node, 3, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0223 = Some(2); + } + } +} + +impl Spec for CheckRelativeEpochSince { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + mine_until_epoch(node, 1, epoch_length - 4, epoch_length); + { + info!("CKB v2019, since relative epoch failed"); + let tx = create_tx_since_relative_epoch(node, 1, 0); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok"); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + assert_epoch_should_be(node, 2, epoch_length - 4, epoch_length); + { + info!("CKB v2019, since relative epoch failed (malformed)"); + let tx = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok (malformed)"); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + assert_epoch_should_be(node, 3, epoch_length - 4, epoch_length); + { + let tx1 = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, 1); + let tx2 = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 2); + + info!("CKB v2019, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx1, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok (boundary, malformed)"); + let res = node.rpc_client().send_transaction_result(tx1.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + + info!("CKB v2021, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx2, ERROR_IMMATURE); + + mine(&node, 1); + info!("CKB v2021, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx2, ERROR_INVALID_SINCE); + + info!("CKB v2019, since relative epoch transaction will be committed (boundary, malformed)"); + assert_epoch_should_be(node, 4, epoch_length - 3, epoch_length); + assert!(check::is_transaction_pending(node, &tx1)); + mine(&node, 1); + assert!(check::is_transaction_proposed(node, &tx1)); + mine(&node, 1); + assert!(check::is_transaction_committed(node, &tx1)); + assert_epoch_should_be(node, 4, epoch_length - 1, epoch_length); + } + { + info!("CKB v2021, since relative epoch failed (malformed)"); + let tx = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + mine(&node, 1); + info!("CKB v2021, since relative epoch failed (malformed)"); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0223 = Some(5); + } + } +} + +fn create_tx_since_absolute_epoch(node: &Node, number: u64, index: u64) -> TransactionView { + let epoch_length = GENESIS_EPOCH_LENGTH; + let epoch = EpochNumberWithFraction::new(number, index, epoch_length); + let since = since_from_absolute_epoch_number(epoch); + let cellbase = node.get_tip_block().transactions()[0].clone(); + node.new_transaction_with_since(cellbase.hash(), since) +} + +fn create_tx_since_relative_epoch(node: &Node, number: u64, index: u64) -> TransactionView { + let epoch_length = GENESIS_EPOCH_LENGTH; + let epoch = EpochNumberWithFraction::new(number, index, epoch_length); + let since = since_from_relative_epoch_number(epoch); + let cellbase = node.get_tip_block().transactions()[0].clone(); + node.new_transaction_with_since(cellbase.hash(), since) +} diff --git a/test/src/specs/mod.rs b/test/src/specs/mod.rs index 6dde71565c..0f9a2da317 100644 --- a/test/src/specs/mod.rs +++ b/test/src/specs/mod.rs @@ -1,6 +1,7 @@ mod alert; mod consensus; mod dao; +mod hardfork; mod mining; mod p2p; mod relay; @@ -11,6 +12,7 @@ mod tx_pool; pub use alert::*; pub use consensus::*; pub use dao::*; +pub use hardfork::*; pub use mining::*; pub use p2p::*; pub use relay::*; diff --git a/test/src/util/check.rs b/test/src/util/check.rs index a87f19ee7a..827c2b425a 100644 --- a/test/src/util/check.rs +++ b/test/src/util/check.rs @@ -1,6 +1,6 @@ use crate::Node; use ckb_jsonrpc_types::Status; -use ckb_types::core::TransactionView; +use ckb_types::core::{EpochNumberWithFraction, HeaderView, TransactionView}; pub fn is_transaction_pending(node: &Node, transaction: &TransactionView) -> bool { node.rpc_client() @@ -28,3 +28,14 @@ pub fn is_transaction_unknown(node: &Node, transaction: &TransactionView) -> boo .get_transaction(transaction.hash()) .is_none() } + +pub fn assert_epoch_should_be(node: &Node, number: u64, index: u64, length: u64) { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + let target_epoch = EpochNumberWithFraction::new(number, index, length); + assert_eq!( + tip_epoch, target_epoch, + "current tip epoch is {}, but expect epoch {}", + tip_epoch, target_epoch + ); +} diff --git a/test/src/util/mining.rs b/test/src/util/mining.rs index 6be7bfc9b0..8a16c5f1f7 100644 --- a/test/src/util/mining.rs +++ b/test/src/util/mining.rs @@ -1,7 +1,9 @@ use crate::util::chain::forward_main_blocks; use crate::Node; -use ckb_types::core::{BlockBuilder, BlockView}; -use ckb_types::packed; +use ckb_types::{ + core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}, + packed, +}; pub fn out_bootstrap_period(nodes: &[Node]) { if let Some(node0) = nodes.first() { @@ -59,6 +61,21 @@ pub fn mine_until_out_bootstrap_period(node: &Node) { mine_until_bool(node, predicate) } +pub fn mine_until_epoch(node: &Node, number: u64, index: u64, length: u64) { + let target_epoch = EpochNumberWithFraction::new(number, index, length); + mine_until_bool(node, || { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + if tip_epoch > target_epoch { + panic!( + "expect mine until epoch {} but already be epoch {}", + target_epoch, tip_epoch + ); + } + tip_epoch == target_epoch + }); +} + pub fn mine(node: &Node, count: u64) { let with = |builder: BlockBuilder| builder.build(); mine_with(node, count, with) diff --git a/test/src/utils.rs b/test/src/utils.rs index 7c629984f9..91153dd8f4 100644 --- a/test/src/utils.rs +++ b/test/src/utils.rs @@ -4,7 +4,7 @@ use crate::util::mining::mine; use crate::{Node, TXOSet}; use ckb_network::bytes::Bytes; use ckb_types::{ - core::{BlockNumber, BlockView, EpochNumber, HeaderView, TransactionView}, + core::{BlockNumber, BlockView, EpochNumberWithFraction, HeaderView, TransactionView}, packed::{ BlockTransactions, Byte32, CompactBlock, GetBlocks, RelayMessage, RelayTransaction, RelayTransactionHashes, RelayTransactions, SendBlock, SendHeaders, SyncMessage, @@ -161,12 +161,12 @@ pub fn since_from_absolute_block_number(block_number: BlockNumber) -> u64 { FLAG_SINCE_BLOCK_NUMBER | block_number } -pub fn since_from_relative_epoch_number(epoch_number: EpochNumber) -> u64 { - FLAG_SINCE_RELATIVE | FLAG_SINCE_EPOCH_NUMBER | epoch_number +pub fn since_from_relative_epoch_number(epoch_number: EpochNumberWithFraction) -> u64 { + FLAG_SINCE_RELATIVE | FLAG_SINCE_EPOCH_NUMBER | epoch_number.full_value() } -pub fn since_from_absolute_epoch_number(epoch_number: EpochNumber) -> u64 { - FLAG_SINCE_EPOCH_NUMBER | epoch_number +pub fn since_from_absolute_epoch_number(epoch_number: EpochNumberWithFraction) -> u64 { + FLAG_SINCE_EPOCH_NUMBER | epoch_number.full_value() } pub fn since_from_relative_timestamp(timestamp: u64) -> u64 { diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index ba748b891c..9403f3b97e 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -68,5 +68,8 @@ primary_epoch_reward_halving_interval = 8760 epoch_duration_target = 14400 genesis_epoch_length = 1000 +[params.hardfork] +rfc_pr_0223 = 9_223_372_036_854_775_807 + [pow] func = "Dummy" diff --git a/util/types/src/core/extras.rs b/util/types/src/core/extras.rs index 9f703397a4..d7c2c920b7 100644 --- a/util/types/src/core/extras.rs +++ b/util/types/src/core/extras.rs @@ -462,4 +462,12 @@ impl EpochNumberWithFraction { pub fn to_rational(self) -> RationalU256 { RationalU256::new(self.index().into(), self.length().into()) + U256::from(self.number()) } + + /// Check the data format. + /// + /// The epoch length should be greater than zero. + /// The epoch index should be less than the epoch length. + pub fn is_well_formed(self) -> bool { + self.length() > 0 && self.length() > self.index() + } } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index ed80e29cd8..ea36749497 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -94,8 +94,7 @@ macro_rules! define_methods { /// [`HardForkSwitchBuilder`]: struct.HardForkSwitchBuilder.html #[derive(Debug, Clone)] pub struct HardForkSwitch { - // TODO hardfork Remove this feature after add real hardfork features. - rfc_0000: EpochNumber, + rfc_pr_0223: EpochNumber, } /// Builder for [`HardForkSwitch`]. @@ -103,7 +102,12 @@ pub struct HardForkSwitch { /// [`HardForkSwitch`]: struct.HardForkSwitch.html #[derive(Debug, Clone, Default)] pub struct HardForkSwitchBuilder { - rfc_0000: Option, + // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. + /// In the "since epoch", the index should be less than length and + /// the length should be greater than zero. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0223: Option, } impl HardForkSwitch { @@ -114,22 +118,22 @@ impl HardForkSwitch { /// Creates a new builder based on the current instance. pub fn as_builder(&self) -> HardForkSwitchBuilder { - Self::new_builder().rfc_0000(self.rfc_0000()) + Self::new_builder().rfc_pr_0223(self.rfc_pr_0223()) } /// Creates a new instance that all hard fork features are disabled forever. pub fn new_without_any_enabled() -> Self { // Use a builder to ensure all features are set manually. - Self::new_builder().disable_rfc_0000().build().unwrap() + Self::new_builder().disable_rfc_pr_0223().build().unwrap() } } define_methods!( - rfc_0000, - dummy_feature, - is_dummy_feature_enabled, - disable_rfc_0000, - "RFC 0000" + rfc_pr_0223, + check_length_in_epoch_since, + is_check_length_in_epoch_since_enabled, + disable_rfc_pr_0223, + "RFC PR 0223" ); impl HardForkSwitchBuilder { @@ -147,7 +151,7 @@ impl HardForkSwitchBuilder { })?; }; } - let rfc_0000 = try_find!(rfc_0000); - Ok(HardForkSwitch { rfc_0000 }) + let rfc_pr_0223 = try_find!(rfc_pr_0223); + Ok(HardForkSwitch { rfc_pr_0223 }) } } diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index b0040a0cf1..bda3972923 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -14,6 +14,7 @@ use ckb_types::{ core::{ capacity_bytes, cell::{CellMetaBuilder, ResolvedTransaction}, + hardfork::HardForkSwitch, BlockNumber, Capacity, EpochNumber, EpochNumberWithFraction, HeaderView, TransactionBuilder, TransactionInfo, TransactionView, }, @@ -531,6 +532,68 @@ fn test_fraction_epoch_since_verify() { assert!(result.is_ok()); } +#[test] +fn test_fraction_epoch_since_verify_v2021() { + let fork_at = 16; + let transaction_info = + MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1); + let tx1 = create_tx_with_lock(0x2000_0a00_0f00_000f); + let rtx1 = create_resolve_tx_with_transaction_info(&tx1, transaction_info.clone()); + let tx2 = create_tx_with_lock(0x2000_0a00_0500_0010); + let rtx2 = create_resolve_tx_with_transaction_info(&tx2, transaction_info); + let median_time_context = MockMedianTime::new(vec![0; 11]); + let tx_env = { + let block_number = 1000; + let epoch = EpochNumberWithFraction::new(fork_at, 5, 10); + let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + + { + // Test CKB v2019 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0223(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .hardfork_switch(hardfork_switch) + .build(); + let result = SinceVerifier::new(&rtx1, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = SinceVerifier::new(&rtx2, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } + { + // Test CKB v2021 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0223(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .hardfork_switch(hardfork_switch) + .build(); + + let result = SinceVerifier::new(&rtx1, &consensus, &median_time_context, &tx_env).verify(); + assert_error_eq!( + result.unwrap_err(), + TransactionError::InvalidSince { index: 0 } + ); + + let result = SinceVerifier::new(&rtx2, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } +} + #[test] pub fn test_absolute_block_number_lock() { // absolute lock until block number 0xa diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index f3dd19336f..3413ad08e8 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -599,6 +599,14 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + if hardfork_switch.is_check_length_in_epoch_since_enabled(epoch_number) + && !epoch_number_with_fraction.is_well_formed() + { + return Err((TransactionError::InvalidSince { index }).into()); + } if self.tx_env.epoch() < epoch_number_with_fraction { return Err((TransactionError::Immature { index }).into()); } @@ -638,6 +646,14 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + if hardfork_switch.is_check_length_in_epoch_since_enabled(epoch_number) + && !epoch_number_with_fraction.is_well_formed() + { + return Err((TransactionError::InvalidSince { index }).into()); + } let a = self.tx_env.epoch().to_rational(); let b = info.block_epoch.to_rational() + epoch_number_with_fraction.to_rational(); From 4d75c5ce2c120f0210437a7f40df32dfa9fe1a1f Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Fri, 21 May 2021 18:27:54 +0800 Subject: [PATCH 06/18] feat(hardfork): use block timestamp of input cells as relative since start timestamp --- spec/src/hardfork.rs | 7 +++++- test/template/specs/integration.toml | 1 + util/types/src/core/hardfork.rs | 29 +++++++++++++++++++++--- verification/src/transaction_verifier.rs | 18 +++++++++++++-- 4 files changed, 49 insertions(+), 6 deletions(-) diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 5b87e59374..091e298969 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -13,6 +13,8 @@ use serde::{Deserialize, Serialize}; pub struct HardForkConfig { // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0221: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, } @@ -56,7 +58,9 @@ impl HardForkConfig { builder: HardForkSwitchBuilder, ckb2021: EpochNumber, ) -> Result { - let builder = builder.rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)); + let builder = builder + .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) + .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)); Ok(builder) } @@ -65,6 +69,7 @@ impl HardForkConfig { /// Enable features which are set to `None` at the user provided epoch. pub fn complete_with_default(&self, default: EpochNumber) -> Result { HardForkSwitch::new_builder() + .rfc_pr_0221(self.rfc_pr_0221.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .build() } diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index 9403f3b97e..15b3abf35c 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -69,6 +69,7 @@ epoch_duration_target = 14400 genesis_epoch_length = 1000 [params.hardfork] +rfc_pr_0221 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 [pow] diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index ea36749497..64631e4294 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -94,6 +94,7 @@ macro_rules! define_methods { /// [`HardForkSwitchBuilder`]: struct.HardForkSwitchBuilder.html #[derive(Debug, Clone)] pub struct HardForkSwitch { + rfc_pr_0221: EpochNumber, rfc_pr_0223: EpochNumber, } @@ -103,6 +104,11 @@ pub struct HardForkSwitch { #[derive(Debug, Clone, Default)] pub struct HardForkSwitchBuilder { // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. + /// Use the input cell creation block timestamp as start time in the + /// "relative since timestamp". + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0221: Option, /// In the "since epoch", the index should be less than length and /// the length should be greater than zero. /// @@ -118,16 +124,29 @@ impl HardForkSwitch { /// Creates a new builder based on the current instance. pub fn as_builder(&self) -> HardForkSwitchBuilder { - Self::new_builder().rfc_pr_0223(self.rfc_pr_0223()) + Self::new_builder() + .rfc_pr_0221(self.rfc_pr_0221()) + .rfc_pr_0223(self.rfc_pr_0223()) } /// Creates a new instance that all hard fork features are disabled forever. pub fn new_without_any_enabled() -> Self { // Use a builder to ensure all features are set manually. - Self::new_builder().disable_rfc_pr_0223().build().unwrap() + Self::new_builder() + .disable_rfc_pr_0221() + .disable_rfc_pr_0223() + .build() + .unwrap() } } +define_methods!( + rfc_pr_0221, + block_ts_as_relative_since_start, + is_block_ts_as_relative_since_start_enabled, + disable_rfc_pr_0221, + "RFC PR 0221" +); define_methods!( rfc_pr_0223, check_length_in_epoch_since, @@ -151,7 +170,11 @@ impl HardForkSwitchBuilder { })?; }; } + let rfc_pr_0221 = try_find!(rfc_pr_0221); let rfc_pr_0223 = try_find!(rfc_pr_0223); - Ok(HardForkSwitch { rfc_pr_0223 }) + Ok(HardForkSwitch { + rfc_pr_0221, + rfc_pr_0223, + }) } } diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 3413ad08e8..5d1d57dfdb 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -574,6 +574,11 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { self.block_median_time(&parent_hash) } + fn parent_block_time(&self, block_hash: &Byte32) -> u64 { + let (timestamp, _, _) = self.data_loader.timestamp_and_parent(block_hash); + timestamp + } + fn block_median_time(&self, block_hash: &Byte32) -> u64 { if let Some(median_time) = self.median_timestamps_cache.borrow().peek(block_hash) { return *median_time; @@ -666,10 +671,19 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { // parent of current block. // pass_median_time(input_cell's block) starts with cell_block_number - 1, // which is the parent of input_cell's block + let proposal_window = self.consensus.tx_proposal_window(); let parent_hash = self.tx_env.parent_hash(); - let cell_median_timestamp = self.parent_median_time(&info.block_hash); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + let base_timestamp = if hardfork_switch + .is_block_ts_as_relative_since_start_enabled(epoch_number) + { + self.parent_median_time(&info.block_hash) + } else { + self.parent_block_time(&info.block_hash) + }; let current_median_time = self.block_median_time(&parent_hash); - if current_median_time < cell_median_timestamp + timestamp { + if current_median_time < base_timestamp + timestamp { return Err((TransactionError::Immature { index }).into()); } } From d1c0bbece4ac1bebdac968dbe494936010b160c2 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Mon, 24 May 2021 03:03:05 +0800 Subject: [PATCH 07/18] feat(hardfork): allow unknown block versions and transactions versions --- rpc/src/module/chain.rs | 5 +- spec/src/consensus.rs | 96 +++++----- spec/src/hardfork.rs | 6 +- test/src/main.rs | 2 + test/src/specs/hardfork/v2021/mod.rs | 2 + test/src/specs/hardfork/v2021/version.rs | 181 ++++++++++++++++++ test/src/specs/rpc/get_block_template.rs | 2 +- test/src/util/check.rs | 28 ++- test/template/specs/integration.toml | 1 + tx-pool/src/block_assembler/mod.rs | 24 ++- tx-pool/src/process.rs | 5 + util/types/src/constants.rs | 8 - util/types/src/core/advanced_builders.rs | 20 +- util/types/src/core/error.rs | 12 +- util/types/src/core/hardfork.rs | 16 ++ util/types/src/lib.rs | 1 - verification/src/header_verifier.rs | 38 ++-- verification/src/tests/header_verifier.rs | 64 +++++-- .../src/tests/transaction_verifier.rs | 69 +++++-- verification/src/transaction_verifier.rs | 48 +++-- 20 files changed, 485 insertions(+), 143 deletions(-) create mode 100644 test/src/specs/hardfork/v2021/version.rs delete mode 100644 util/types/src/constants.rs diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index aac5f8e5ed..8361c1911d 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1615,8 +1615,9 @@ impl ChainRpc for ChainRpcImpl { } fn get_consensus(&self) -> Result { - let consensus = self.shared.consensus().clone(); - Ok(consensus.into()) + let consensus = self.shared.consensus(); + let epoch_number = self.shared.snapshot().tip_header().epoch().number(); + Ok(consensus.to_json(epoch_number)) } fn get_block_median_time(&self, block_hash: H256) -> Result> { diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index 2bf066b662..ab54d5f98a 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -14,7 +14,6 @@ use ckb_resource::Resource; use ckb_traits::{BlockEpoch, EpochProvider}; use ckb_types::{ bytes::Bytes, - constants::{BLOCK_VERSION, TX_VERSION}, core::{ hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, EpochNumber, EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, @@ -261,8 +260,6 @@ impl ConsensusBuilder { secp256k1_blake160_sighash_all_type_hash: None, secp256k1_blake160_multisig_all_type_hash: None, genesis_epoch_ext, - block_version: BLOCK_VERSION, - tx_version: TX_VERSION, type_id_code_hash: TYPE_ID_CODE_HASH, proposer_reward_ratio: PROPOSER_REWARD_RATIO, max_block_proposals_limit: MAX_BLOCK_PROPOSALS_LIMIT, @@ -513,10 +510,6 @@ pub struct Consensus { pub max_block_cycles: Cycle, /// Maximum number of bytes to use for the entire block pub max_block_bytes: u64, - /// The block version number supported - pub block_version: Version, - /// The tx version number supported - pub tx_version: Version, /// The "TYPE_ID" in hex pub type_id_code_hash: H256, /// The Limit to the number of proposals per block @@ -694,13 +687,13 @@ impl Consensus { } /// The current block version - pub fn block_version(&self) -> Version { - self.block_version + pub fn block_version(&self, _epoch_number: EpochNumber) -> Version { + 0 } /// The current transaction version - pub fn tx_version(&self) -> Version { - self.tx_version + pub fn tx_version(&self, _epoch_number: EpochNumber) -> Version { + 0 } /// The "TYPE_ID" in hex @@ -930,6 +923,46 @@ impl Consensus { pub fn hardfork_switch(&self) -> &HardForkSwitch { &self.hardfork_switch } + + /// Convert to a JSON type with an input epoch number as the tip epoch number. + pub fn to_json(&self, epoch_number: EpochNumber) -> ckb_jsonrpc_types::Consensus { + ckb_jsonrpc_types::Consensus { + id: self.id.clone(), + genesis_hash: self.genesis_hash.unpack(), + dao_type_hash: self.dao_type_hash().map(|h| h.unpack()), + secp256k1_blake160_sighash_all_type_hash: self + .secp256k1_blake160_sighash_all_type_hash() + .map(|h| h.unpack()), + secp256k1_blake160_multisig_all_type_hash: self + .secp256k1_blake160_multisig_all_type_hash() + .map(|h| h.unpack()), + initial_primary_epoch_reward: self.initial_primary_epoch_reward.into(), + secondary_epoch_reward: self.secondary_epoch_reward.into(), + max_uncles_num: (self.max_uncles_num as u64).into(), + orphan_rate_target: self.orphan_rate_target().to_owned(), + epoch_duration_target: self.epoch_duration_target.into(), + tx_proposal_window: ckb_jsonrpc_types::ProposalWindow { + closest: self.tx_proposal_window.0.into(), + farthest: self.tx_proposal_window.1.into(), + }, + proposer_reward_ratio: RationalU256::new_raw( + self.proposer_reward_ratio.numer().into(), + self.proposer_reward_ratio.denom().into(), + ), + cellbase_maturity: self.cellbase_maturity.into(), + median_time_block_count: (self.median_time_block_count as u64).into(), + max_block_cycles: self.max_block_cycles.into(), + max_block_bytes: self.max_block_bytes.into(), + block_version: self.block_version(epoch_number).into(), + tx_version: self.tx_version(epoch_number).into(), + type_id_code_hash: self.type_id_code_hash().to_owned(), + max_block_proposals_limit: self.max_block_proposals_limit.into(), + primary_epoch_reward_halving_interval: self + .primary_epoch_reward_halving_interval + .into(), + permanent_difficulty_in_dummy: self.permanent_difficulty_in_dummy, + } + } } /// Trait for consensus provider. @@ -961,47 +994,6 @@ impl NextBlockEpoch { } } -impl From for ckb_jsonrpc_types::Consensus { - fn from(consensus: Consensus) -> Self { - Self { - id: consensus.id, - genesis_hash: consensus.genesis_hash.unpack(), - dao_type_hash: consensus.dao_type_hash.map(|h| h.unpack()), - secp256k1_blake160_sighash_all_type_hash: consensus - .secp256k1_blake160_sighash_all_type_hash - .map(|h| h.unpack()), - secp256k1_blake160_multisig_all_type_hash: consensus - .secp256k1_blake160_multisig_all_type_hash - .map(|h| h.unpack()), - initial_primary_epoch_reward: consensus.initial_primary_epoch_reward.into(), - secondary_epoch_reward: consensus.secondary_epoch_reward.into(), - max_uncles_num: (consensus.max_uncles_num as u64).into(), - orphan_rate_target: consensus.orphan_rate_target, - epoch_duration_target: consensus.epoch_duration_target.into(), - tx_proposal_window: ckb_jsonrpc_types::ProposalWindow { - closest: consensus.tx_proposal_window.0.into(), - farthest: consensus.tx_proposal_window.1.into(), - }, - proposer_reward_ratio: RationalU256::new_raw( - consensus.proposer_reward_ratio.numer().into(), - consensus.proposer_reward_ratio.denom().into(), - ), - cellbase_maturity: consensus.cellbase_maturity.into(), - median_time_block_count: (consensus.median_time_block_count as u64).into(), - max_block_cycles: consensus.max_block_cycles.into(), - max_block_bytes: consensus.max_block_bytes.into(), - block_version: consensus.block_version.into(), - tx_version: consensus.tx_version.into(), - type_id_code_hash: consensus.type_id_code_hash, - max_block_proposals_limit: consensus.max_block_proposals_limit.into(), - primary_epoch_reward_halving_interval: consensus - .primary_epoch_reward_halving_interval - .into(), - permanent_difficulty_in_dummy: consensus.permanent_difficulty_in_dummy, - } - } -} - // most simple and efficient way for now fn u256_low_u64(u: U256) -> u64 { u.0[0] diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 091e298969..6a4aae808f 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -16,6 +16,8 @@ pub struct HardForkConfig { pub rfc_pr_0221: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0230: Option, } macro_rules! check_default { @@ -60,7 +62,8 @@ impl HardForkConfig { ) -> Result { let builder = builder .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) - .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)); + .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) + .rfc_pr_0230(check_default!(self, rfc_pr_0230, ckb2021)); Ok(builder) } @@ -71,6 +74,7 @@ impl HardForkConfig { HardForkSwitch::new_builder() .rfc_pr_0221(self.rfc_pr_0221.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) + .rfc_pr_0230(self.rfc_pr_0230.unwrap_or(default)) .build() } } diff --git a/test/src/main.rs b/test/src/main.rs index fd41842e4c..5a37742095 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -491,6 +491,8 @@ fn all_specs() -> Vec> { // Test hard fork features Box::new(CheckAbsoluteEpochSince), Box::new(CheckRelativeEpochSince), + Box::new(CheckBlockVersion), + Box::new(CheckTxVersion), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs index 42bdaf1706..d820330f6a 100644 --- a/test/src/specs/hardfork/v2021/mod.rs +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -1,3 +1,5 @@ mod since; +mod version; pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; +pub use version::{CheckBlockVersion, CheckTxVersion}; diff --git a/test/src/specs/hardfork/v2021/version.rs b/test/src/specs/hardfork/v2021/version.rs new file mode 100644 index 0000000000..f2451b8d36 --- /dev/null +++ b/test/src/specs/hardfork/v2021/version.rs @@ -0,0 +1,181 @@ +use crate::util::{ + check::{assert_epoch_should_be, assert_submit_block_fail, assert_submit_block_ok}, + mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, +}; +use crate::utils::assert_send_transaction_fail; +use crate::{Node, Spec}; +use ckb_logger::info; +use ckb_types::{ + core::{BlockView, TransactionView, Version}, + packed, + prelude::*, +}; + +const GENESIS_EPOCH_LENGTH: u64 = 10; + +const ERROR_BLOCK_VERSION: &str = "Invalid: Header(Version(BlockVersionError("; +const ERROR_TX_VERSION: &str = + "TransactionFailedToVerify: Verification failed Transaction(MismatchedVersion"; + +pub struct CheckBlockVersion; +pub struct CheckTxVersion; + +impl Spec for CheckBlockVersion { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, submit block with version 1 is failed"); + let block = create_block_with_version(node, 1); + assert_submit_block_fail(node, &block, ERROR_BLOCK_VERSION); + } + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, submit block with version 0 is passed"); + let block = create_block_with_version(node, 0); + assert_submit_block_ok(node, &block); + } + assert_epoch_should_be(node, 1, 3, epoch_length); + mine_until_epoch(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, submit block with version 1 is failed (boundary)"); + let block = create_block_with_version(node, 1); + assert_submit_block_fail(node, &block, ERROR_BLOCK_VERSION); + } + assert_epoch_should_be(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, submit block with version 0 is passed (boundary)"); + let block = create_block_with_version(node, 0); + assert_submit_block_ok(node, &block); + } + assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); + { + info!("CKB v2021, submit block with version 1 is passed (boundary)"); + let block = create_block_with_version(node, 1); + assert_submit_block_ok(node, &block); + } + assert_epoch_should_be(node, 2, 0, epoch_length); + { + info!("CKB v2021, submit block with version 0 is passed (boundary)"); + let block = create_block_with_version(node, 0); + assert_submit_block_ok(node, &block); + } + assert_epoch_should_be(node, 2, 1, epoch_length); + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0230 = Some(2); + } + } +} + +impl Spec for CheckTxVersion { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); + + info!("CKB v2019, submit transaction with version 1 is failed"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); + assert_send_transaction_fail(node, &tx, ERROR_TX_VERSION); + + info!("CKB v2019, submit block with version 0 is passed"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine_until_epoch(node, 1, epoch_length - 4, epoch_length); + { + let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); + + info!("CKB v2019, submit transaction with version 1 is failed (boundary)"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); + assert_send_transaction_fail(node, &tx, ERROR_TX_VERSION); + + info!("CKB v2019, submit block with version 0 is passed (boundary)"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine(node, 1); + assert_epoch_should_be(node, 1, epoch_length - 3, epoch_length); + { + let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); + info!("CKB v2021, submit transaction with version 1 is passed (boundary)"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + + let input_cell_hash = &tx.hash(); + info!("CKB v2021, submit block with version 0 is passed (boundary)"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + + let input_cell_hash = &tx.hash(); + info!("CKB v2021, submit transaction with version 100 is passed (boundary)"); + let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 100); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0230 = Some(2); + } + } +} + +fn create_block_with_version(node: &Node, version: Version) -> BlockView { + node.new_block_builder(None, None, None) + .version(version.pack()) + .build() +} + +fn create_transaction_with_version( + node: &Node, + hash: packed::Byte32, + index: u32, + version: Version, +) -> TransactionView { + let always_success_cell_dep = node.always_success_cell_dep(); + let always_success_script = node.always_success_script(); + + let input_cell = node + .rpc_client() + .get_transaction(hash.clone()) + .unwrap() + .transaction + .inner + .outputs[index as usize] + .to_owned(); + + let cell_input = packed::CellInput::new(packed::OutPoint::new(hash, index), 0); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(always_success_script) + .build(); + + TransactionView::new_advanced_builder() + .version(version.pack()) + .cell_dep(always_success_cell_dep) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() +} diff --git a/test/src/specs/rpc/get_block_template.rs b/test/src/specs/rpc/get_block_template.rs index 4b6070e695..50b58842b6 100644 --- a/test/src/specs/rpc/get_block_template.rs +++ b/test/src/specs/rpc/get_block_template.rs @@ -12,7 +12,7 @@ impl Spec for RpcGetBlockTemplate { let node0 = &nodes[0]; let default_bytes_limit = node0.consensus().max_block_bytes; let default_cycles_limit = node0.consensus().max_block_cycles; - let default_block_version = node0.consensus().block_version; + let default_block_version = node0.consensus().block_version(0); let epoch_length = node0.consensus().genesis_epoch_ext().length(); // get block template when tip block is genesis diff --git a/test/src/util/check.rs b/test/src/util/check.rs index 827c2b425a..a52e6bb4bb 100644 --- a/test/src/util/check.rs +++ b/test/src/util/check.rs @@ -1,6 +1,6 @@ use crate::Node; use ckb_jsonrpc_types::Status; -use ckb_types::core::{EpochNumberWithFraction, HeaderView, TransactionView}; +use ckb_types::core::{BlockView, EpochNumberWithFraction, HeaderView, TransactionView}; pub fn is_transaction_pending(node: &Node, transaction: &TransactionView) -> bool { node.rpc_client() @@ -39,3 +39,29 @@ pub fn assert_epoch_should_be(node: &Node, number: u64, index: u64, length: u64) tip_epoch, target_epoch ); } + +pub fn assert_submit_block_fail(node: &Node, block: &BlockView, message: &str) { + let result = node + .rpc_client() + .submit_block("".to_owned(), block.data().into()); + assert!( + result.is_err(), + "expect error \"{}\" but got \"Ok(())\"", + message, + ); + let error = result.expect_err(&format!("block is invalid since {}", message)); + let error_string = error.to_string(); + assert!( + error_string.contains(message), + "expect error \"{}\" but got \"{}\"", + message, + error_string, + ); +} + +pub fn assert_submit_block_ok(node: &Node, block: &BlockView) { + let result = node + .rpc_client() + .submit_block("".to_owned(), block.data().into()); + assert!(result.is_ok(), "expect \"Ok(())\" but got \"{:?}\"", result,); +} diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index 15b3abf35c..be25857be0 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -71,6 +71,7 @@ genesis_epoch_length = 1000 [params.hardfork] rfc_pr_0221 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 +rfc_pr_0230 = 9_223_372_036_854_775_807 [pow] func = "Dummy" diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index 2872dec7af..0b28d2bf8a 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -13,8 +13,8 @@ use ckb_store::ChainStore; use ckb_types::{ bytes::Bytes, core::{ - BlockNumber, Capacity, Cycle, EpochExt, HeaderView, TransactionBuilder, TransactionView, - UncleBlockView, Version, + BlockNumber, Capacity, Cycle, EpochExt, EpochNumber, HeaderView, TransactionBuilder, + TransactionView, UncleBlockView, Version, }, packed::{self, Byte32, CellInput, CellOutput, CellbaseWitness, ProposalShortId, Transaction}, prelude::*, @@ -73,16 +73,22 @@ impl BlockAssembler { bytes_limit: Option, proposals_limit: Option, max_version: Option, + epoch_number: EpochNumber, ) -> (u64, u64, Version) { - let bytes_limit = bytes_limit - .min(Some(consensus.max_block_bytes())) - .unwrap_or_else(|| consensus.max_block_bytes()); + let bytes_limit = { + let default_bytes_limit = consensus.max_block_bytes(); + bytes_limit + .min(Some(default_bytes_limit)) + .unwrap_or(default_bytes_limit) + }; + let default_proposals_limit = consensus.max_block_proposals_limit(); let proposals_limit = proposals_limit - .min(Some(consensus.max_block_proposals_limit())) - .unwrap_or_else(|| consensus.max_block_proposals_limit()); + .min(Some(default_proposals_limit)) + .unwrap_or(default_proposals_limit); + let default_block_version = consensus.block_version(epoch_number); let version = max_version - .min(Some(consensus.block_version())) - .unwrap_or_else(|| consensus.block_version()); + .min(Some(default_block_version)) + .unwrap_or(default_block_version); (bytes_limit, proposals_limit, version) } diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 2e32f47c1f..de025e5994 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -296,11 +296,16 @@ impl TxPoolService { let snapshot = self.snapshot(); let consensus = snapshot.consensus(); let cycles_limit = consensus.max_block_cycles(); + let epoch_number_of_next_block = snapshot + .tip_header() + .epoch() + .minimum_epoch_number_after_n_blocks(1); let (bytes_limit, proposals_limit, version) = BlockAssembler::transform_params( consensus, bytes_limit, proposals_limit, max_version, + epoch_number_of_next_block, ); if let Some(cache) = self diff --git a/util/types/src/constants.rs b/util/types/src/constants.rs deleted file mode 100644 index 44d5527897..0000000000 --- a/util/types/src/constants.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! All Constants. - -use crate::core::Version; - -/// Current transaction version. -pub const TX_VERSION: Version = 0; -/// Current block version. -pub const BLOCK_VERSION: Version = 0; diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index 4c0e3e7324..6024c7208d 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -1,7 +1,7 @@ //! Advanced builders for Transaction(View), Header(View) and Block(View). use crate::{ - constants, core, packed, + core, packed, prelude::*, utilities::{merkle_root, DIFF_TWO}, }; @@ -16,7 +16,7 @@ use crate::{ /// /// [`TransactionView`]: struct.TransactionView.html /// [`packed::TransactionBuilder`]: ../packed/struct.TransactionBuilder.html -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct TransactionBuilder { pub(crate) version: packed::Uint32, pub(crate) cell_deps: Vec, @@ -69,24 +69,10 @@ pub struct BlockBuilder { * Implement std traits. */ -impl ::std::default::Default for TransactionBuilder { - fn default() -> Self { - Self { - version: constants::TX_VERSION.pack(), - cell_deps: Default::default(), - header_deps: Default::default(), - inputs: Default::default(), - outputs: Default::default(), - witnesses: Default::default(), - outputs_data: Default::default(), - } - } -} - impl ::std::default::Default for HeaderBuilder { fn default() -> Self { Self { - version: constants::BLOCK_VERSION.pack(), + version: Default::default(), parent_hash: Default::default(), timestamp: Default::default(), number: Default::default(), diff --git a/util/types/src/core/error.rs b/util/types/src/core/error.rs index 24d19cbcdd..bb86de3854 100644 --- a/util/types/src/core/error.rs +++ b/util/types/src/core/error.rs @@ -159,6 +159,15 @@ pub enum TransactionError { actual: Version, }, + /// The transaction version is too low, it's deprecated. + #[error("DeprecatedVersion: minimum {}, got {}", minimum, actual)] + DeprecatedVersion { + /// The minimum supported transaction version. + minimum: Version, + /// The actual transaction version. + actual: Version, + }, + /// The transaction size exceeds limit. #[error("ExceededMaximumBlockBytes: expected transaction serialized size ({actual}) < block size limit ({limit})")] ExceededMaximumBlockBytes { @@ -186,7 +195,8 @@ impl TransactionError { TransactionError::Immature { .. } | TransactionError::CellbaseImmaturity { .. } - | TransactionError::MismatchedVersion { .. } => false, + | TransactionError::MismatchedVersion { .. } + | TransactionError::DeprecatedVersion { .. } => false, } } } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index 64631e4294..e56a056398 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -96,6 +96,7 @@ macro_rules! define_methods { pub struct HardForkSwitch { rfc_pr_0221: EpochNumber, rfc_pr_0223: EpochNumber, + rfc_pr_0230: EpochNumber, } /// Builder for [`HardForkSwitch`]. @@ -114,6 +115,10 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, + /// Allow unknown block versions and transactions versions. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0230: Option, } impl HardForkSwitch { @@ -127,6 +132,7 @@ impl HardForkSwitch { Self::new_builder() .rfc_pr_0221(self.rfc_pr_0221()) .rfc_pr_0223(self.rfc_pr_0223()) + .rfc_pr_0230(self.rfc_pr_0230()) } /// Creates a new instance that all hard fork features are disabled forever. @@ -135,6 +141,7 @@ impl HardForkSwitch { Self::new_builder() .disable_rfc_pr_0221() .disable_rfc_pr_0223() + .disable_rfc_pr_0230() .build() .unwrap() } @@ -154,6 +161,13 @@ define_methods!( disable_rfc_pr_0223, "RFC PR 0223" ); +define_methods!( + rfc_pr_0230, + allow_unknown_versions, + is_allow_unknown_versions_enabled, + disable_rfc_pr_0230, + "RFC PR 0230" +); impl HardForkSwitchBuilder { /// Build a new [`HardForkSwitch`]. @@ -172,9 +186,11 @@ impl HardForkSwitchBuilder { } let rfc_pr_0221 = try_find!(rfc_pr_0221); let rfc_pr_0223 = try_find!(rfc_pr_0223); + let rfc_pr_0230 = try_find!(rfc_pr_0230); Ok(HardForkSwitch { rfc_pr_0221, rfc_pr_0223, + rfc_pr_0230, }) } } diff --git a/util/types/src/lib.rs b/util/types/src/lib.rs index 1e2fc209ad..7849f7cf19 100644 --- a/util/types/src/lib.rs +++ b/util/types/src/lib.rs @@ -14,7 +14,6 @@ mod generated; pub use generated::packed; pub mod core; -pub mod constants; mod conversion; mod extension; pub mod utilities; diff --git a/verification/src/header_verifier.rs b/verification/src/header_verifier.rs index ee34525dc1..ba93bd43ba 100644 --- a/verification/src/header_verifier.rs +++ b/verification/src/header_verifier.rs @@ -6,7 +6,7 @@ use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; use ckb_pow::PowEngine; use ckb_traits::HeaderProvider; -use ckb_types::core::{HeaderView, Version}; +use ckb_types::core::HeaderView; use ckb_verification_traits::Verifier; use faketime::unix_time_as_millis; @@ -31,7 +31,7 @@ impl<'a, DL: HeaderProvider> HeaderVerifier<'a, DL> { impl<'a, DL: HeaderProvider> Verifier for HeaderVerifier<'a, DL> { type Target = HeaderView; fn verify(&self, header: &Self::Target) -> Result<(), Error> { - VersionVerifier::new(header, self.consensus.block_version()).verify()?; + VersionVerifier::new(header, self.consensus).verify()?; // POW check first PowVerifier::new(header, self.consensus.pow_engine().as_ref()).verify()?; let parent = self @@ -53,26 +53,36 @@ impl<'a, DL: HeaderProvider> Verifier for HeaderVerifier<'a, DL> { pub struct VersionVerifier<'a> { header: &'a HeaderView, - block_version: Version, + consensus: &'a Consensus, } impl<'a> VersionVerifier<'a> { - pub fn new(header: &'a HeaderView, block_version: Version) -> Self { - VersionVerifier { - header, - block_version, - } + pub fn new(header: &'a HeaderView, consensus: &'a Consensus) -> Self { + VersionVerifier { header, consensus } } pub fn verify(&self) -> Result<(), Error> { - if self.header.version() != self.block_version { - return Err(BlockVersionError { - expected: self.block_version, - actual: self.header.version(), + let epoch_number = self.header.epoch().number(); + let target = self.consensus.block_version(epoch_number); + let actual = self.header.version(); + let failed = if self + .consensus + .hardfork_switch() + .is_allow_unknown_versions_enabled(epoch_number) + { + actual < target + } else { + actual != target + }; + if failed { + Err(BlockVersionError { + expected: target, + actual, } - .into()); + .into()) + } else { + Ok(()) } - Ok(()) } } diff --git a/verification/src/tests/header_verifier.rs b/verification/src/tests/header_verifier.rs index 3a1cbf8f32..5eaed40491 100644 --- a/verification/src/tests/header_verifier.rs +++ b/verification/src/tests/header_verifier.rs @@ -1,9 +1,14 @@ use crate::header_verifier::{NumberVerifier, PowVerifier, TimestampVerifier, VersionVerifier}; use crate::{BlockVersionError, NumberError, PowError, TimestampError, ALLOWED_FUTURE_BLOCKTIME}; +use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_error::assert_error_eq; use ckb_pow::PowEngine; use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; -use ckb_types::{constants::BLOCK_VERSION, core::HeaderBuilder, packed::Header, prelude::*}; +use ckb_types::{ + core::{hardfork::HardForkSwitch, EpochNumberWithFraction, HeaderBuilder}, + packed::Header, + prelude::*, +}; use faketime::unix_time_as_millis; fn mock_median_time_context() -> MockMedianTime { @@ -14,18 +19,53 @@ fn mock_median_time_context() -> MockMedianTime { #[test] pub fn test_version() { - let header = HeaderBuilder::default() - .version((BLOCK_VERSION + 1).pack()) + let fork_at = 10; + let default_block_version = ConsensusBuilder::default().build().block_version(fork_at); + let epoch = EpochNumberWithFraction::new(fork_at, 0, 10); + let header1 = HeaderBuilder::default() + .version(default_block_version.pack()) + .epoch(epoch.pack()) .build(); - let verifier = VersionVerifier::new(&header, BLOCK_VERSION); - - assert_error_eq!( - verifier.verify().unwrap_err(), - BlockVersionError { - expected: BLOCK_VERSION, - actual: BLOCK_VERSION + 1 - } - ); + let header2 = HeaderBuilder::default() + .version((default_block_version + 1).pack()) + .epoch(epoch.pack()) + .build(); + { + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0230(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + let result = VersionVerifier::new(&header1, &consensus).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = VersionVerifier::new(&header2, &consensus).verify(); + assert_error_eq!( + result.unwrap_err(), + BlockVersionError { + expected: default_block_version, + actual: default_block_version + 1 + } + ); + } + { + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0230(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + let result = VersionVerifier::new(&header1, &consensus).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = VersionVerifier::new(&header2, &consensus).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } } #[cfg(not(disable_faketime))] diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index bda3972923..f3ea06ed4b 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -10,7 +10,6 @@ use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; use ckb_traits::HeaderProvider; use ckb_types::{ bytes::Bytes, - constants::TX_VERSION, core::{ capacity_bytes, cell::{CellMetaBuilder, ResolvedTransaction}, @@ -40,18 +39,66 @@ pub fn test_empty() { #[test] pub fn test_version() { - let transaction = TransactionBuilder::default() - .version((TX_VERSION + 1).pack()) + let fork_at = 10; + let default_tx_version = ConsensusBuilder::default().build().tx_version(fork_at); + let tx1 = TransactionBuilder::default() + .version(default_tx_version.pack()) .build(); - let verifier = VersionVerifier::new(&transaction, TX_VERSION); - - assert_error_eq!( - verifier.verify().unwrap_err(), - TransactionError::MismatchedVersion { - expected: 0, - actual: 1 - }, + let rtx1 = create_resolve_tx_with_transaction_info( + &tx1, + MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), ); + let tx2 = TransactionBuilder::default() + .version((default_tx_version + 1).pack()) + .build(); + let rtx2 = create_resolve_tx_with_transaction_info( + &tx2, + MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), + ); + let tx_env = { + let epoch = EpochNumberWithFraction::new(fork_at, 0, 10); + let header = HeaderView::new_advanced_builder() + .epoch(epoch.pack()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + + { + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0230(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + let result = VersionVerifier::new(&rtx1, &consensus, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = VersionVerifier::new(&rtx2, &consensus, &tx_env).verify(); + assert_error_eq!( + result.unwrap_err(), + TransactionError::MismatchedVersion { + expected: default_tx_version, + actual: default_tx_version + 1 + }, + ); + } + { + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0230(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + let result = VersionVerifier::new(&rtx1, &consensus, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = VersionVerifier::new(&rtx2, &consensus, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } } #[test] diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 5d1d57dfdb..21a9df1aaa 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -10,7 +10,7 @@ use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; use ckb_types::{ core::{ cell::{CellMeta, ResolvedTransaction}, - Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, Version, + Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, }, packed::Byte32, prelude::*, @@ -51,13 +51,11 @@ impl<'a, DL: HeaderProvider + ConsensusProvider> TimeRelativeTransactionVerifier /// /// Basic checks that don't depend on any context /// Contains: -/// - Check for version /// - Check for size /// - Check inputs and output empty /// - Check for duplicate deps /// - Check for whether outputs match data pub struct NonContextualTransactionVerifier<'a> { - pub(crate) version: VersionVerifier<'a>, pub(crate) size: SizeVerifier<'a>, pub(crate) empty: EmptyVerifier<'a>, pub(crate) duplicate_deps: DuplicateDepsVerifier<'a>, @@ -68,7 +66,6 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Creates a new NonContextualTransactionVerifier pub fn new(tx: &'a TransactionView, consensus: &'a Consensus) -> Self { NonContextualTransactionVerifier { - version: VersionVerifier::new(tx, consensus.tx_version()), size: SizeVerifier::new(tx, consensus.max_block_bytes()), empty: EmptyVerifier::new(tx), duplicate_deps: DuplicateDepsVerifier::new(tx), @@ -78,7 +75,6 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Perform context-independent verification pub fn verify(&self) -> Result<(), Error> { - self.version.verify()?; self.size.verify()?; self.empty.verify()?; self.duplicate_deps.verify()?; @@ -90,12 +86,14 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Context-dependent verification checks for transaction /// /// Contains: +/// [`VersionVerifier`](./struct.VersionVerifier.html) /// [`MaturityVerifier`](./struct.MaturityVerifier.html) /// [`SinceVerifier`](./struct.SinceVerifier.html) /// [`CapacityVerifier`](./struct.CapacityVerifier.html) /// [`ScriptVerifier`](./struct.ScriptVerifier.html) /// [`FeeCalculator`](./struct.FeeCalculator.html) pub struct ContextualTransactionVerifier<'a, DL> { + pub(crate) version: VersionVerifier<'a>, pub(crate) maturity: MaturityVerifier<'a>, pub(crate) since: SinceVerifier<'a, DL>, pub(crate) capacity: CapacityVerifier<'a>, @@ -115,6 +113,7 @@ where tx_env: &'a TxVerifyEnv, ) -> Self { ContextualTransactionVerifier { + version: VersionVerifier::new(&rtx, consensus, tx_env), maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), script: ScriptVerifier::new(rtx, data_loader), capacity: CapacityVerifier::new(rtx, consensus.dao_type_hash()), @@ -128,6 +127,7 @@ where /// skip script verify will result in the return value cycle always is zero pub fn verify(&self, max_cycles: Cycle, skip_script_verify: bool) -> Result { let timer = Timer::start(); + self.version.verify()?; self.maturity.verify()?; self.capacity.verify()?; self.since.verify()?; @@ -203,23 +203,45 @@ impl<'a, DL: CellDataProvider + HeaderProvider + EpochProvider> FeeCalculator<'a } pub struct VersionVerifier<'a> { - transaction: &'a TransactionView, - tx_version: Version, + rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, + tx_env: &'a TxVerifyEnv, } impl<'a> VersionVerifier<'a> { - pub fn new(transaction: &'a TransactionView, tx_version: Version) -> Self { + pub fn new( + rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, + tx_env: &'a TxVerifyEnv, + ) -> Self { VersionVerifier { - transaction, - tx_version, + rtx, + consensus, + tx_env, } } pub fn verify(&self) -> Result<(), Error> { - if self.transaction.version() != self.tx_version { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let target = self.consensus.tx_version(epoch_number); + let actual = self.rtx.transaction.version(); + if self + .consensus + .hardfork_switch() + .is_allow_unknown_versions_enabled(epoch_number) + { + if actual < target { + return Err((TransactionError::DeprecatedVersion { + minimum: target, + actual, + }) + .into()); + } + } else if actual != target { return Err((TransactionError::MismatchedVersion { - expected: self.tx_version, - actual: self.transaction.version(), + expected: target, + actual, }) .into()); } From 3cf555e3654546ff9e5729a245669e247f8cd237 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Wed, 26 May 2021 01:44:03 +0800 Subject: [PATCH 08/18] feat(hardfork): allow script multiple matches on identical data for type hash-type scripts --- rpc/src/module/experiment.rs | 13 +- script/src/lib.rs | 2 + script/src/verify.rs | 201 ++- .../src/verify_env.rs | 0 spec/src/hardfork.rs | 4 + test/src/main.rs | 5 +- test/src/specs/hardfork/v2021/cell_deps.rs | 1085 +++++++++++++++++ test/src/specs/hardfork/v2021/mod.rs | 5 + test/src/specs/tx_pool/duplicate_cell_deps.rs | 192 --- test/src/specs/tx_pool/mod.rs | 2 - test/src/util/check.rs | 12 + test/template/specs/integration.toml | 1 + util/types/src/core/hardfork.rs | 16 + verification/src/lib.rs | 3 +- verification/src/transaction_verifier.rs | 26 +- 15 files changed, 1329 insertions(+), 238 deletions(-) rename verification/src/tx_verify_env.rs => script/src/verify_env.rs (100%) create mode 100644 test/src/specs/hardfork/v2021/cell_deps.rs delete mode 100644 test/src/specs/tx_pool/duplicate_cell_deps.rs diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index 74de101b3f..b187462dfa 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -13,7 +13,7 @@ use ckb_types::{ prelude::*, H256, }; -use ckb_verification::ScriptVerifier; +use ckb_verification::{ScriptVerifier, TxVerifyEnv}; use jsonrpc_core::Result; use jsonrpc_derive::rpc; use std::collections::HashSet; @@ -248,8 +248,15 @@ impl<'a> DryRunner<'a> { Ok(resolved) => { let consensus = snapshot.consensus(); let max_cycles = consensus.max_block_cycles; - match ScriptVerifier::new(&resolved, &snapshot.as_data_provider()) - .verify(max_cycles) + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_submit(&tip_header); + match ScriptVerifier::new( + &resolved, + consensus, + &snapshot.as_data_provider(), + &tx_env, + ) + .verify(max_cycles) { Ok(cycles) => Ok(DryRunResult { cycles: cycles.into(), diff --git a/script/src/lib.rs b/script/src/lib.rs index 363b68fa4e..dcd5078855 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -6,8 +6,10 @@ mod syscalls; mod type_id; mod types; mod verify; +mod verify_env; pub use crate::error::{ScriptError, TransactionScriptError}; pub use crate::ill_transaction_checker::IllTransactionChecker; pub use crate::types::{ScriptGroup, ScriptGroupType}; pub use crate::verify::TransactionScriptsVerifier; +pub use crate::verify_env::TxVerifyEnv; diff --git a/script/src/verify.rs b/script/src/verify.rs index 7309f79bb6..0a84a409e5 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -7,8 +7,9 @@ use crate::{ }, type_id::TypeIdSystemScript, types::{ScriptGroup, ScriptGroupType}, + verify_env::TxVerifyEnv, }; -use ckb_chain_spec::consensus::TYPE_ID_CODE_HASH; +use ckb_chain_spec::consensus::{Consensus, TYPE_ID_CODE_HASH}; use ckb_error::Error; #[cfg(feature = "logging")] use ckb_logger::{debug, info}; @@ -41,19 +42,49 @@ type CoreMachineType = Box; #[cfg(not(has_asm))] type CoreMachineType = DefaultCoreMachine>>; +enum Binaries { + Unique((Byte32, Bytes)), + Duplicate((Byte32, Bytes)), + Multiple, +} + +impl Binaries { + fn new(data_hash: Byte32, data: Bytes) -> Self { + Self::Unique((data_hash, data)) + } + + fn merge(&mut self, data_hash: &Byte32) { + match self { + Self::Unique(ref old) | Self::Duplicate(ref old) => { + if old.0 != *data_hash { + *self = Self::Multiple; + } else { + *self = Self::Duplicate(old.to_owned()); + } + } + Self::Multiple => { + *self = Self::Multiple; + } + } + } +} + /// This struct leverages CKB VM to verify transaction inputs. /// /// FlatBufferBuilder owned `Vec` that grows as needed, in the /// future, we might refactor this to share buffer to achieve zero-copy pub struct TransactionScriptsVerifier<'a, DL> { data_loader: &'a DL, + consensus: &'a Consensus, + tx_env: &'a TxVerifyEnv, + debug_printer: Box, outputs: Vec, rtx: &'a ResolvedTransaction, binaries_by_data_hash: HashMap, - binaries_by_type_hash: HashMap, + binaries_by_type_hash: HashMap, lock_groups: HashMap, type_groups: HashMap, } @@ -67,7 +98,9 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D /// * `data_loader` - used to load cell data. pub fn new( rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> TransactionScriptsVerifier<'a, DL> { let tx_hash = rtx.transaction.hash(); let resolved_cell_deps = &rtx.resolved_cell_deps; @@ -94,18 +127,18 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D .collect(); let mut binaries_by_data_hash: HashMap = HashMap::default(); - let mut binaries_by_type_hash: HashMap = HashMap::default(); + let mut binaries_by_type_hash: HashMap = HashMap::default(); for cell_meta in resolved_cell_deps { let data = data_loader.load_cell_data(cell_meta).expect("cell data"); let data_hash = data_loader .load_cell_data_hash(cell_meta) .expect("cell data hash"); - binaries_by_data_hash.insert(data_hash, data.to_owned()); + binaries_by_data_hash.insert(data_hash.to_owned(), data.to_owned()); if let Some(t) = &cell_meta.cell_output.type_().to_opt() { binaries_by_type_hash .entry(t.calc_script_hash()) - .and_modify(|e| e.1 = true) - .or_insert((data.to_owned(), false)); + .and_modify(|bin| bin.merge(&data_hash)) + .or_insert_with(|| Binaries::new(data_hash.to_owned(), data.to_owned())); } } @@ -137,6 +170,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D TransactionScriptsVerifier { data_loader, + consensus, + tx_env, binaries_by_data_hash, binaries_by_type_hash, outputs, @@ -271,12 +306,23 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } ScriptHashType::Type => { - if let Some((data, multiple)) = self.binaries_by_type_hash.get(&script.code_hash()) - { - if *multiple { - Err(ScriptError::MultipleMatches) - } else { - Ok(data.to_owned()) + if let Some(ref bin) = self.binaries_by_type_hash.get(&script.code_hash()) { + match bin { + Binaries::Unique((_, ref data)) => Ok(data.to_owned()), + Binaries::Duplicate((_, ref data)) => { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + if self + .consensus + .hardfork_switch() + .is_allow_multiple_matches_on_identical_data_enabled(epoch_number) + { + Ok(data.to_owned()) + } else { + Err(ScriptError::MultipleMatches) + } + } + Binaries::Multiple => Err(ScriptError::MultipleMatches), } } else { Err(ScriptError::InvalidCodeHash) @@ -457,8 +503,8 @@ mod tests { use ckb_store::{data_loader_wrapper::DataLoaderWrapper, ChainDB}; use ckb_types::{ core::{ - capacity_bytes, cell::CellMetaBuilder, Capacity, Cycle, DepType, ScriptHashType, - TransactionBuilder, TransactionInfo, + capacity_bytes, cell::CellMetaBuilder, Capacity, Cycle, DepType, HeaderView, + ScriptHashType, TransactionBuilder, TransactionInfo, }, h256, packed::{ @@ -469,7 +515,9 @@ mod tests { }; use faster_hex::hex_encode; - use ckb_chain_spec::consensus::{TWO_IN_TWO_OUT_BYTES, TWO_IN_TWO_OUT_CYCLES}; + use ckb_chain_spec::consensus::{ + ConsensusBuilder, TWO_IN_TWO_OUT_BYTES, TWO_IN_TWO_OUT_CYCLES, + }; use ckb_error::assert_error_eq; use ckb_test_chain_utils::{ always_success_cell, ckb_testnet_consensus, secp256k1_blake160_sighash_cell, @@ -579,8 +627,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(600).is_ok()); } @@ -639,8 +692,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); @@ -720,8 +778,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); } @@ -812,8 +875,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -877,7 +945,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -929,7 +1003,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -1012,7 +1092,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); } @@ -1086,8 +1172,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -1151,7 +1242,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); // Cycles can tell that both lock and type scripts are executed assert_eq!( @@ -1214,8 +1311,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); if let Err(err) = verifier.verify(TYPE_ID_CYCLES * 2) { panic!("expect verification ok, got: {:?}", err); @@ -1276,8 +1378,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(TYPE_ID_CYCLES - 1).unwrap_err(), @@ -1347,8 +1454,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(1_001_000).is_ok()); } @@ -1406,8 +1518,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(1_001_000).is_ok()); } @@ -1480,8 +1597,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(1_001_000).unwrap_err(), @@ -1560,8 +1682,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(1_001_000).unwrap_err(), @@ -1629,8 +1756,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(TYPE_ID_CYCLES * 2).unwrap_err(), @@ -1786,8 +1918,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); let cycle = verifier.verify(TWO_IN_TWO_OUT_CYCLES).unwrap(); assert!(cycle <= TWO_IN_TWO_OUT_CYCLES); diff --git a/verification/src/tx_verify_env.rs b/script/src/verify_env.rs similarity index 100% rename from verification/src/tx_verify_env.rs rename to script/src/verify_env.rs diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 6a4aae808f..0fe60706d2 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -15,6 +15,8 @@ pub struct HardForkConfig { /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0221: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0222: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0230: Option, @@ -62,6 +64,7 @@ impl HardForkConfig { ) -> Result { let builder = builder .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) + .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) .rfc_pr_0230(check_default!(self, rfc_pr_0230, ckb2021)); Ok(builder) @@ -73,6 +76,7 @@ impl HardForkConfig { pub fn complete_with_default(&self, default: EpochNumber) -> Result { HardForkSwitch::new_builder() .rfc_pr_0221(self.rfc_pr_0221.unwrap_or(default)) + .rfc_pr_0222(self.rfc_pr_0222.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .rfc_pr_0230(self.rfc_pr_0230.unwrap_or(default)) .build() diff --git a/test/src/main.rs b/test/src/main.rs index 5a37742095..72686eac96 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -487,12 +487,15 @@ fn all_specs() -> Vec> { Box::new(CellBeingCellDepThenSpentInSameBlockTestSubmitBlock), Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplate), Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplateMultiple), - Box::new(DuplicateCellDeps), // Test hard fork features Box::new(CheckAbsoluteEpochSince), Box::new(CheckRelativeEpochSince), Box::new(CheckBlockVersion), Box::new(CheckTxVersion), + Box::new(DuplicateCellDepsForDataHashTypeLockScript), + Box::new(DuplicateCellDepsForDataHashTypeTypeScript), + Box::new(DuplicateCellDepsForTypeHashTypeLockScript), + Box::new(DuplicateCellDepsForTypeHashTypeTypeScript), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/specs/hardfork/v2021/cell_deps.rs b/test/src/specs/hardfork/v2021/cell_deps.rs new file mode 100644 index 0000000000..85d9daf114 --- /dev/null +++ b/test/src/specs/hardfork/v2021/cell_deps.rs @@ -0,0 +1,1085 @@ +use crate::{ + util::{ + cell::gen_spendable, + check::{assert_epoch_should_be, assert_epoch_should_less_than, is_transaction_committed}, + mining::{mine, mine_until_bool, mine_until_epoch}, + }, + utils::assert_send_transaction_fail, + Node, Spec, +}; +use ckb_jsonrpc_types as rpc; +use ckb_logger::{debug, info}; +use ckb_types::{ + core::{Capacity, DepType, ScriptHashType, TransactionBuilder, TransactionView}, + packed, + prelude::*, +}; +use std::fmt; + +const GENESIS_EPOCH_LENGTH: u64 = 10; +const CKB2021_START_EPOCH: u64 = 10; + +const TEST_CASES_COUNT: usize = (8 + 4) * 3; +const CELL_DEPS_COUNT: usize = 2 + 3 + 2; +const INITIAL_INPUTS_COUNT: usize = 2 + CELL_DEPS_COUNT + TEST_CASES_COUNT; + +pub struct DuplicateCellDepsForDataHashTypeLockScript; +pub struct DuplicateCellDepsForDataHashTypeTypeScript; +pub struct DuplicateCellDepsForTypeHashTypeLockScript; +pub struct DuplicateCellDepsForTypeHashTypeTypeScript; + +struct NewScript { + data: packed::Bytes, + cell_dep: packed::CellDep, + data_hash: packed::Byte32, + type_hash: packed::Byte32, +} + +#[derive(Debug, Clone, Copy)] +enum ExpectedResult { + ShouldBePassed, + DuplicateCellDeps, + MultipleMatchesLock, + MultipleMatchesType, +} + +const PASS: ExpectedResult = ExpectedResult::ShouldBePassed; +const DUP: ExpectedResult = ExpectedResult::DuplicateCellDeps; +const MML: ExpectedResult = ExpectedResult::MultipleMatchesLock; +const MMT: ExpectedResult = ExpectedResult::MultipleMatchesType; + +// For all: +// - code1 and code2 are cell deps with same data +// - dep_group1 and dep_group2 are cell deps which point to different code cell deps with same data +// - dep_group2 and dep_group2_copy are cell deps which point to same code cell deps +// For type hash type only: +// - code3 has same type with code1 (code2) but different data +// - dep_group3 has same type with dep_group1 (dep_group2, dep_group2_copy) but different data +struct CellDepSet { + code1: packed::CellDep, + code2: packed::CellDep, + dep_group1: packed::CellDep, + dep_group2: packed::CellDep, + dep_group2_copy: packed::CellDep, + code3: packed::CellDep, + dep_group3: packed::CellDep, +} + +struct DuplicateCellDepsTestRunner { + tag: &'static str, +} + +impl Spec for DuplicateCellDepsForDataHashTypeLockScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("data-hash-type/lock-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, None); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.use_new_data_script_replace_lock_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, None); + let tb = TransactionView::new_advanced_builder(); + { + info!("CKB v2019:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + } + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForDataHashTypeTypeScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("data-hash-type/type-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, None); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.add_new_data_script_as_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, None); + let tb = TransactionView::new_advanced_builder().cell_dep(node.always_success_cell_dep()); + { + info!("CKB v2019:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + } + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForTypeHashTypeLockScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("type-hash-type/lock-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script0 = NewScript::new_with_id(node, 0, &mut original_inputs, None); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, Some(&script0)); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.use_new_data_script_replace_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, Some(&script0)); + let tb = TransactionView::new_advanced_builder(); + { + info!("CKB v2019:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MML); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MML); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + assert_epoch_should_be(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForTypeHashTypeTypeScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("type-hash-type/type-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script0 = NewScript::new_with_id(node, 0, &mut original_inputs, None); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, Some(&script0)); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.add_new_type_script_as_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, Some(&script0)); + let tb = TransactionView::new_advanced_builder().cell_dep(node.always_success_cell_dep()); + { + info!("CKB v2019:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + assert_epoch_should_be(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl NewScript { + fn new_with_id( + node: &Node, + id: u8, + inputs: &mut impl Iterator, + type_script_opt: Option<&Self>, + ) -> Self { + let original_data = node.always_success_raw_data(); + let data = packed::Bytes::new_builder() + .extend(original_data.as_ref().iter().map(|x| (*x).into())) + .push(id.into()) + .build(); + let tx = Self::deploy(node, &data, inputs, type_script_opt); + let cell_dep = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(tx.hash(), 0)) + .dep_type(DepType::Code.into()) + .build(); + let data_hash = packed::CellOutput::calc_data_hash(&data.raw_data()); + let type_hash = tx + .output(0) + .unwrap() + .type_() + .to_opt() + .unwrap() + .calc_script_hash(); + Self { + data, + cell_dep, + data_hash, + type_hash, + } + } + + fn deploy( + node: &Node, + data: &packed::Bytes, + inputs: &mut impl Iterator, + type_script_opt: Option<&Self>, + ) -> TransactionView { + let (type_script, tx_template) = if let Some(script) = type_script_opt { + ( + script.as_data_script(), + TransactionView::new_advanced_builder().cell_dep(script.cell_dep()), + ) + } else { + ( + node.always_success_script(), + TransactionView::new_advanced_builder(), + ) + }; + let cell_input = inputs.next().unwrap(); + let cell_output = packed::CellOutput::new_builder() + .type_(Some(type_script).pack()) + .build_exact_capacity(Capacity::bytes(data.len()).unwrap()) + .unwrap(); + let tx = tx_template + .cell_dep(node.always_success_cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(data.clone()) + .build(); + node.submit_transaction(&tx); + mine_until_bool(node, || is_transaction_committed(node, &tx)); + tx + } + + fn data(&self) -> packed::Bytes { + self.data.clone() + } + + fn cell_dep(&self) -> packed::CellDep { + self.cell_dep.clone() + } + + fn as_data_script(&self) -> packed::Script { + packed::Script::new_builder() + .code_hash(self.data_hash.clone()) + .hash_type(ScriptHashType::Data.into()) + .build() + } + + fn as_type_script(&self) -> packed::Script { + packed::Script::new_builder() + .code_hash(self.type_hash.clone()) + .hash_type(ScriptHashType::Type.into()) + .build() + } +} + +impl fmt::Display for ExpectedResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::ShouldBePassed => write!(f, "allowed"), + _ => write!(f, "not allowed"), + } + } +} + +impl ExpectedResult { + fn error_message(self) -> Option<&'static str> { + match self { + Self::ShouldBePassed => None, + Self::DuplicateCellDeps => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Transaction(DuplicateCellDeps(", + ), + Self::MultipleMatchesLock => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError \ + { source: Inputs[0].Lock, cause: MultipleMatches })", + ), + Self::MultipleMatchesType => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError \ + { source: Inputs[0].Type, cause: MultipleMatches })", + ), + } + } +} + +impl DuplicateCellDepsTestRunner { + fn new(tag: &'static str) -> Self { + Self { tag } + } + + fn submit_transaction_until_committed(&self, node: &Node, tx: &TransactionView) { + debug!( + "[{}] >>> >>> submit: submit transaction {:#x}.", + self.tag, + tx.hash() + ); + node.submit_transaction(tx); + mine_until_bool(node, || is_transaction_committed(node, tx)); + } +} + +// Convert Lock Script or Type Script +impl DuplicateCellDepsTestRunner { + fn create_initial_inputs( + &self, + node: &Node, + txs: Vec, + ) -> impl Iterator { + for tx in &txs { + node.rpc_client().send_transaction(tx.data().into()); + } + mine_until_bool(node, || { + txs.iter().all(|tx| is_transaction_committed(node, &tx)) + }); + txs.into_iter().map(|tx| { + let out_point = packed::OutPoint::new(tx.hash(), 0); + packed::CellInput::new(out_point, 0) + }) + } + + fn get_previous_output(node: &Node, cell_input: &packed::CellInput) -> rpc::CellOutput { + let previous_output = cell_input.previous_output(); + let previous_output_index: usize = previous_output.index().unpack(); + node.rpc_client() + .get_transaction(previous_output.tx_hash()) + .unwrap() + .transaction + .inner + .outputs[previous_output_index] + .clone() + } + + fn use_new_data_script_replace_lock_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(new_script.as_data_script()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn add_new_data_script_as_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(node.always_success_script()) + .type_(Some(new_script.as_data_script()).pack()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn use_new_data_script_replace_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(new_script.as_type_script()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn add_new_type_script_as_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(node.always_success_script()) + .type_(Some(new_script.as_type_script()).pack()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } +} + +// Create All Cell Deps for Test +impl DuplicateCellDepsTestRunner { + fn create_cell_dep_set( + &self, + node: &Node, + inputs: &mut impl Iterator, + script: &NewScript, + type_script_opt: Option<&NewScript>, + ) -> CellDepSet { + let code_txs = { + let tx_template = { + let script_output = if let Some(type_script) = type_script_opt { + packed::CellOutput::new_builder() + .type_(Some(type_script.as_data_script()).pack()) + } else { + packed::CellOutput::new_builder() + } + .build_exact_capacity(Capacity::bytes(script.data().len()).unwrap()) + .unwrap(); + if let Some(type_script) = type_script_opt { + TransactionView::new_advanced_builder().cell_dep(type_script.cell_dep()) + } else { + TransactionView::new_advanced_builder() + } + .output(script_output) + .output_data(script.data()) + }; + self.create_transactions_as_code_type_cell_deps(node, inputs, &tx_template) + }; + + let dep_group_txs = { + let tx_template = TransactionView::new_advanced_builder(); + self.create_transactions_as_depgroup_type_cell_deps( + node, + inputs, + &tx_template, + &code_txs, + ) + }; + let incorrect_opt = type_script_opt.map(|type_script| { + self.create_transactions_as_incorrect_cell_deps(node, inputs, type_script) + }); + self.combine_cell_deps(code_txs, dep_group_txs, incorrect_opt) + } + + fn create_transactions_as_code_type_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + ) -> (TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 2 transactions as code-type cell deps.", + self.tag + ); + let tx_template = tx_template.clone().cell_dep(node.always_success_cell_dep()); + let dep1_tx = tx_template.clone().input(inputs.next().unwrap()).build(); + let dep2_tx = tx_template.input(inputs.next().unwrap()).build(); + self.submit_transaction_until_committed(node, &dep1_tx); + self.submit_transaction_until_committed(node, &dep2_tx); + (dep1_tx, dep2_tx) + } + + fn create_transactions_as_depgroup_type_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + code_txs: &(TransactionView, TransactionView), + ) -> (TransactionView, TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 3 transactions as depgroup-type cell deps.", + self.tag + ); + let (ref dep1_tx, ref dep2_tx) = code_txs; + let tx_template = tx_template.clone().cell_dep(node.always_success_cell_dep()); + let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); + let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); + let dep3_data = vec![dep1_op].pack().as_bytes().pack(); + let dep4_data = vec![dep2_op].pack().as_bytes().pack(); + let dep3_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep3_data.len()).unwrap()) + .unwrap(); + let dep4_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep4_data.len()).unwrap()) + .unwrap(); + let dep3_tx = tx_template + .clone() + .input(inputs.next().unwrap()) + .output(dep3_output) + .output_data(dep3_data) + .build(); + let dep4_tx = tx_template + .clone() + .input(inputs.next().unwrap()) + .output(dep4_output.clone()) + .output_data(dep4_data.clone()) + .build(); + let dep4b_tx = tx_template + .input(inputs.next().unwrap()) + .output(dep4_output) + .output_data(dep4_data) + .build(); + self.submit_transaction_until_committed(node, &dep3_tx); + self.submit_transaction_until_committed(node, &dep4_tx); + self.submit_transaction_until_committed(node, &dep4b_tx); + (dep3_tx, dep4_tx, dep4b_tx) + } + + fn create_transactions_as_incorrect_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + type_script: &NewScript, + ) -> (TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 2 transactions as incorrect cell deps.", + self.tag + ); + let original_data = node.always_success_raw_data(); + let dep5_data = packed::Bytes::new_builder() + .extend(original_data.as_ref().iter().map(|x| (*x).into())) + .build(); + let dep5_output = packed::CellOutput::new_builder() + .type_(Some(type_script.as_data_script()).pack()) + .build_exact_capacity(Capacity::bytes(dep5_data.len()).unwrap()) + .unwrap(); + let dep5_tx = TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(type_script.cell_dep()) + .input(inputs.next().unwrap()) + .output(dep5_output) + .output_data(dep5_data) + .build(); + let dep5_op = packed::OutPoint::new(dep5_tx.hash(), 0); + let dep6_data = vec![dep5_op].pack().as_bytes().pack(); + let dep6_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep6_data.len()).unwrap()) + .unwrap(); + let dep6_tx = TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .input(inputs.next().unwrap()) + .output(dep6_output) + .output_data(dep6_data) + .build(); + self.submit_transaction_until_committed(node, &dep5_tx); + self.submit_transaction_until_committed(node, &dep6_tx); + (dep5_tx, dep6_tx) + } + + fn combine_cell_deps( + &self, + code_txs: (TransactionView, TransactionView), + dep_group_txs: (TransactionView, TransactionView, TransactionView), + incorrect_opt: Option<(TransactionView, TransactionView)>, + ) -> CellDepSet { + info!("[{}] >>> warm up: create all cell deps for test.", self.tag); + let (dep1_tx, dep2_tx) = code_txs; + let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); + let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); + let code1 = packed::CellDep::new_builder() + .out_point(dep1_op) + .dep_type(DepType::Code.into()) + .build(); + let code2 = packed::CellDep::new_builder() + .out_point(dep2_op) + .dep_type(DepType::Code.into()) + .build(); + let (dep3_tx, dep4_tx, dep4b_tx) = dep_group_txs; + let dep_group1 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep3_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let dep_group2 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep4_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let dep_group2_copy = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep4b_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let (code3, dep_group3) = if let Some((dep5_tx, dep6_tx)) = incorrect_opt { + let dep3_op = packed::OutPoint::new(dep5_tx.hash(), 0); + let code3 = packed::CellDep::new_builder() + .out_point(dep3_op) + .dep_type(DepType::Code.into()) + .build(); + let dep_group3 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep6_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + (code3, dep_group3) + } else { + (Default::default(), Default::default()) + }; + CellDepSet { + code1, + code2, + dep_group1, + dep_group2, + dep_group2_copy, + code3, + dep_group3, + } + } +} + +// Implementation All Test Cases +impl DuplicateCellDepsTestRunner { + fn test_result( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_builder: TransactionBuilder, + expected: ExpectedResult, + ) { + let empty_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::shannons(0)) + .unwrap(); + let tx = tx_builder + .input(inputs.next().unwrap()) + .output(empty_output) + .output_data(Default::default()) + .build(); + if let Some(errmsg) = expected.error_message() { + assert_send_transaction_fail(node, &tx, &errmsg); + } else { + self.submit_transaction_until_committed(node, &tx); + } + } + + fn test_single_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template.clone().cell_dep(deps.code1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_single_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template.clone().cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_duplicate_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two code-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code2.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_hybrid_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: hybrid-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_duplicate_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate dep_group-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group2.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_point_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have a same point is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group2.clone()) + .cell_dep(deps.dep_group2_copy.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two code-type cell deps have same type but not same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code3.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_hybrid_type_v1( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two hybrid-type cell deps have same type but not same data v1 is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.dep_group3.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_hybrid_type_v2( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two hybrid-type cell deps have same type but not same data v2 is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code3.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have same type but not same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group3.clone()); + self.test_result(node, inputs, tx, expected); + } +} diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs index d820330f6a..920756f286 100644 --- a/test/src/specs/hardfork/v2021/mod.rs +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -1,5 +1,10 @@ +mod cell_deps; mod since; mod version; +pub use cell_deps::{ + DuplicateCellDepsForDataHashTypeLockScript, DuplicateCellDepsForDataHashTypeTypeScript, + DuplicateCellDepsForTypeHashTypeLockScript, DuplicateCellDepsForTypeHashTypeTypeScript, +}; pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; pub use version::{CheckBlockVersion, CheckTxVersion}; diff --git a/test/src/specs/tx_pool/duplicate_cell_deps.rs b/test/src/specs/tx_pool/duplicate_cell_deps.rs deleted file mode 100644 index e0593b648d..0000000000 --- a/test/src/specs/tx_pool/duplicate_cell_deps.rs +++ /dev/null @@ -1,192 +0,0 @@ -use crate::{ - util::{cell::gen_spendable, check::is_transaction_committed, mining::mine_until_bool}, - utils::assert_send_transaction_fail, - Node, Spec, -}; -use ckb_logger::info; -use ckb_types::{ - core::{Capacity, DepType, TransactionBuilder}, - packed, - prelude::*, -}; - -pub struct DuplicateCellDeps; - -impl Spec for DuplicateCellDeps { - fn run(&self, nodes: &mut Vec) { - let node0 = &nodes[0]; - let always_success_bytes: packed::Bytes = node0.always_success_raw_data().pack(); - let always_success_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(always_success_bytes.len()).unwrap()) - .unwrap(); - let empty_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::shannons(0)) - .unwrap(); - - let mut initial_inputs = gen_spendable(node0, 2 + 3 + 6) - .into_iter() - .map(|input| packed::CellInput::new(input.out_point, 0)); - - info!("warm up: create 2 transactions as code-type cell deps."); - let dep1_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(always_success_output.clone()) - .output_data(always_success_bytes.clone()) - .build(); - let dep2_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(always_success_output) - .output_data(always_success_bytes) - .build(); - node0.submit_transaction(&dep1_tx); - node0.submit_transaction(&dep2_tx); - mine_until_bool(node0, || is_transaction_committed(node0, &dep1_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep2_tx)); - - info!("warm up: create 3 transactions as depgroup-type cell deps."); - let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); - let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); - let dep3_data = vec![dep1_op.clone()].pack().as_bytes().pack(); - let dep4_data = vec![dep2_op.clone()].pack().as_bytes().pack(); - let dep3_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(dep3_data.len()).unwrap()) - .unwrap(); - let dep4_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(dep4_data.len()).unwrap()) - .unwrap(); - let dep3_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep3_output) - .output_data(dep3_data) - .build(); - let dep4_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep4_output.clone()) - .output_data(dep4_data.clone()) - .build(); - let dep4b_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep4_output) - .output_data(dep4_data) - .build(); - node0.submit_transaction(&dep3_tx); - node0.submit_transaction(&dep4_tx); - node0.submit_transaction(&dep4b_tx); - mine_until_bool(node0, || is_transaction_committed(node0, &dep3_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep4_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep4b_tx)); - - info!("warm up: create all cell deps for test."); - let dep1 = packed::CellDep::new_builder() - .out_point(dep1_op) - .dep_type(DepType::Code.into()) - .build(); - let dep2 = packed::CellDep::new_builder() - .out_point(dep2_op) - .dep_type(DepType::Code.into()) - .build(); - let dep3 = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep3_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - let dep4 = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep4_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - let dep4b = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep4b_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - - { - info!("test: duplicate code-type cell deps is not allowed."); - let tx = TransactionBuilder::default() - .cell_dep(dep1.clone()) - .cell_dep(dep1.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - assert_send_transaction_fail( - node0, - &tx, - "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ - Verification failed Transaction(DuplicateCellDeps(", - ); - } - - { - info!("test: two code-type cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep1.clone()) - .cell_dep(dep2) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: hybrid types cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep1) - .cell_dep(dep3.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: duplicate depgroup-type cell deps is not allowed."); - let tx = TransactionBuilder::default() - .cell_dep(dep3.clone()) - .cell_dep(dep3.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - assert_send_transaction_fail( - node0, - &tx, - "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ - Verification failed Transaction(DuplicateCellDeps(", - ); - } - - { - info!("test: two depgroup-type cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep4.clone()) - .cell_dep(dep4b) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: two depgroup-type cell deps point to same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep3) - .cell_dep(dep4) - .input(initial_inputs.next().unwrap()) - .output(empty_output) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - } -} diff --git a/test/src/specs/tx_pool/mod.rs b/test/src/specs/tx_pool/mod.rs index 938d8cebbb..29edb92d6c 100644 --- a/test/src/specs/tx_pool/mod.rs +++ b/test/src/specs/tx_pool/mod.rs @@ -4,7 +4,6 @@ mod dead_cell_deps; mod depend_tx_in_same_block; mod descendant; mod different_txs_with_same_input; -mod duplicate_cell_deps; mod limit; mod pool_reconcile; mod pool_resurrect; @@ -27,7 +26,6 @@ pub use dead_cell_deps::*; pub use depend_tx_in_same_block::*; pub use descendant::*; pub use different_txs_with_same_input::*; -pub use duplicate_cell_deps::*; pub use limit::*; pub use pool_reconcile::*; pub use pool_resurrect::*; diff --git a/test/src/util/check.rs b/test/src/util/check.rs index a52e6bb4bb..36e05e5c6b 100644 --- a/test/src/util/check.rs +++ b/test/src/util/check.rs @@ -40,6 +40,18 @@ pub fn assert_epoch_should_be(node: &Node, number: u64, index: u64, length: u64) ); } +pub fn assert_epoch_should_less_than(node: &Node, number: u64, index: u64, length: u64) { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + let target_epoch = EpochNumberWithFraction::new(number, index, length); + assert!( + tip_epoch < target_epoch, + "current tip epoch is {}, but expect epoch less than {}", + tip_epoch, + target_epoch + ); +} + pub fn assert_submit_block_fail(node: &Node, block: &BlockView, message: &str) { let result = node .rpc_client() diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index be25857be0..4af7be6449 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -70,6 +70,7 @@ genesis_epoch_length = 1000 [params.hardfork] rfc_pr_0221 = 9_223_372_036_854_775_807 +rfc_pr_0222 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 rfc_pr_0230 = 9_223_372_036_854_775_807 diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index e56a056398..78fafd8a6b 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -95,6 +95,7 @@ macro_rules! define_methods { #[derive(Debug, Clone)] pub struct HardForkSwitch { rfc_pr_0221: EpochNumber, + rfc_pr_0222: EpochNumber, rfc_pr_0223: EpochNumber, rfc_pr_0230: EpochNumber, } @@ -110,6 +111,10 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0221: Option, + /// Allow script multiple matches on identical data for type hash-type scripts. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0222: Option, /// In the "since epoch", the index should be less than length and /// the length should be greater than zero. /// @@ -131,6 +136,7 @@ impl HardForkSwitch { pub fn as_builder(&self) -> HardForkSwitchBuilder { Self::new_builder() .rfc_pr_0221(self.rfc_pr_0221()) + .rfc_pr_0222(self.rfc_pr_0222()) .rfc_pr_0223(self.rfc_pr_0223()) .rfc_pr_0230(self.rfc_pr_0230()) } @@ -140,6 +146,7 @@ impl HardForkSwitch { // Use a builder to ensure all features are set manually. Self::new_builder() .disable_rfc_pr_0221() + .disable_rfc_pr_0222() .disable_rfc_pr_0223() .disable_rfc_pr_0230() .build() @@ -154,6 +161,13 @@ define_methods!( disable_rfc_pr_0221, "RFC PR 0221" ); +define_methods!( + rfc_pr_0222, + allow_multiple_matches_on_identical_data, + is_allow_multiple_matches_on_identical_data_enabled, + disable_rfc_pr_0222, + "RFC PR 0222" +); define_methods!( rfc_pr_0223, check_length_in_epoch_since, @@ -185,10 +199,12 @@ impl HardForkSwitchBuilder { }; } let rfc_pr_0221 = try_find!(rfc_pr_0221); + let rfc_pr_0222 = try_find!(rfc_pr_0222); let rfc_pr_0223 = try_find!(rfc_pr_0223); let rfc_pr_0230 = try_find!(rfc_pr_0230); Ok(HardForkSwitch { rfc_pr_0221, + rfc_pr_0222, rfc_pr_0223, rfc_pr_0230, }) diff --git a/verification/src/lib.rs b/verification/src/lib.rs index 851e7d0f3f..3edcaa1dcb 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -8,7 +8,6 @@ mod error; mod genesis_verifier; mod header_verifier; mod transaction_verifier; -mod tx_verify_env; #[cfg(test)] mod tests; @@ -25,7 +24,7 @@ pub use crate::transaction_verifier::{ ContextualTransactionVerifier, NonContextualTransactionVerifier, ScriptVerifier, Since, SinceMetric, TimeRelativeTransactionVerifier, TransactionVerifier, }; -pub use crate::tx_verify_env::TxVerifyEnv; +pub use ckb_script::TxVerifyEnv; /// Maximum amount of time that a block timestamp is allowed to exceed the /// current time before the block will be accepted. diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 21a9df1aaa..30e388a1ae 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -115,7 +115,7 @@ where ContextualTransactionVerifier { version: VersionVerifier::new(&rtx, consensus, tx_env), maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), - script: ScriptVerifier::new(rtx, data_loader), + script: ScriptVerifier::new(rtx, consensus, data_loader, tx_env), capacity: CapacityVerifier::new(rtx, consensus.dao_type_hash()), since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), fee_calculator: FeeCalculator::new(rtx, consensus, data_loader), @@ -282,24 +282,38 @@ impl<'a> SizeVerifier<'a> { /// - [ckb-vm](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0003-ckb-vm/0003-ckb-vm.md) /// - [vm-cycle-limits](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0014-vm-cycle-limits/0014-vm-cycle-limits.md) pub struct ScriptVerifier<'a, DL> { - data_loader: &'a DL, resolved_transaction: &'a ResolvedTransaction, + consensus: &'a Consensus, + data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, } impl<'a, DL: CellDataProvider + HeaderProvider> ScriptVerifier<'a, DL> { /// Creates a new ScriptVerifier - pub fn new(resolved_transaction: &'a ResolvedTransaction, data_loader: &'a DL) -> Self { + pub fn new( + resolved_transaction: &'a ResolvedTransaction, + consensus: &'a Consensus, + data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, + ) -> Self { ScriptVerifier { - data_loader, resolved_transaction, + consensus, + data_loader, + tx_env, } } /// Perform script verification pub fn verify(&self, max_cycles: Cycle) -> Result { let timer = Timer::start(); - let cycle = TransactionScriptsVerifier::new(&self.resolved_transaction, self.data_loader) - .verify(max_cycles)?; + let cycle = TransactionScriptsVerifier::new( + &self.resolved_transaction, + self.consensus, + self.data_loader, + self.tx_env, + ) + .verify(max_cycles)?; metrics!(timing, "ckb.verified_script", timer.stop()); Ok(cycle) } From 5a7efe7a0b720de79ff3761dc6e8424b8d5b22ea Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Wed, 26 May 2021 07:23:13 +0800 Subject: [PATCH 09/18] feat(hardfork): reuse the uncles hash in the header as the extra hash --- db-schema/src/lib.rs | 5 +- freezer/src/freezer.rs | 10 +- spec/src/hardfork.rs | 4 + spec/src/lib.rs | 2 +- store/src/cache.rs | 5 +- store/src/store.rs | 56 ++- store/src/transaction.rs | 16 +- store/src/write_batch.rs | 5 +- sync/src/relayer/mod.rs | 50 ++- sync/src/synchronizer/mod.rs | 48 ++- test/src/main.rs | 1 + test/src/specs/hardfork/v2021/extension.rs | 185 +++++++++ test/src/specs/hardfork/v2021/mod.rs | 2 + test/template/specs/integration.toml | 1 + tx-pool/src/process.rs | 1 + util/app-config/src/configs/store.rs | 8 + util/jsonrpc-types/src/block_template.rs | 73 +++- util/jsonrpc-types/src/blockchain.rs | 51 ++- util/types/schemas/blockchain.mol | 10 +- util/types/schemas/extensions.mol | 9 + util/types/src/core/advanced_builders.rs | 55 ++- util/types/src/core/hardfork.rs | 16 + util/types/src/core/mod.rs | 4 +- util/types/src/core/views.rs | 143 ++++++- util/types/src/extension/calc_hash.rs | 43 +- util/types/src/extension/shortcuts.rs | 132 +++++- util/types/src/generated/blockchain.rs | 365 ++++++++++++++++- util/types/src/generated/extensions.rs | 382 ++++++++++++++++++ .../contextual/src/tests/uncle_verifier.rs | 2 +- .../contextual/src/uncles_verifier.rs | 18 +- verification/src/block_verifier.rs | 53 +++ verification/src/error.rs | 12 + 32 files changed, 1647 insertions(+), 120 deletions(-) create mode 100644 test/src/specs/hardfork/v2021/extension.rs diff --git a/db-schema/src/lib.rs b/db-schema/src/lib.rs index f17c9cc96e..78dfe13811 100644 --- a/db-schema/src/lib.rs +++ b/db-schema/src/lib.rs @@ -3,7 +3,7 @@ /// Column families alias type pub type Col = &'static str; /// Total column number -pub const COLUMNS: u32 = 14; +pub const COLUMNS: u32 = 15; /// Column store chain index pub const COLUMN_INDEX: Col = "0"; /// Column store block's header @@ -35,6 +35,9 @@ pub const COLUMN_CELL_DATA: Col = "12"; /// Column store block number-hash pair pub const COLUMN_NUMBER_HASH: Col = "13"; +/// Column store block extension data +pub const COLUMN_BLOCK_EXTENSION: Col = "14"; + /// META_TIP_HEADER_KEY tracks the latest known best block header pub const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER"; /// META_CURRENT_EPOCH_KEY tracks the latest known epoch diff --git a/freezer/src/freezer.rs b/freezer/src/freezer.rs index 0390e43418..6bec32b882 100644 --- a/freezer/src/freezer.rs +++ b/freezer/src/freezer.rs @@ -55,9 +55,12 @@ impl Freezer { .retrieve(freezer_number - 1) .map_err(internal_error)? .ok_or_else(|| internal_error("freezer inconsistent"))?; - let block = packed::BlockReader::from_slice(&raw_block) + let block = packed::BlockReader::from_compatible_slice(&raw_block) .map_err(internal_error)? .to_entity(); + if block.count_extra_fields() > 1 { + return Err(internal_error("block has more than one extra fields")); + } tip = Some(block.header().into_view()); } @@ -147,9 +150,12 @@ impl Freezer { .retrieve(item) .map_err(internal_error)? .expect("frozen number sync with files"); - let block = packed::BlockReader::from_slice(&raw_block) + let block = packed::BlockReader::from_compatible_slice(&raw_block) .map_err(internal_error)? .to_entity(); + if block.count_extra_fields() > 1 { + return Err(internal_error("block has more than one extra fields")); + } inner.tip = Some(block.header().into_view()); } Ok(()) diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 0fe60706d2..c3f05e51d5 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -19,6 +19,8 @@ pub struct HardForkConfig { /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0224: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0230: Option, } @@ -66,6 +68,7 @@ impl HardForkConfig { .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) + .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)) .rfc_pr_0230(check_default!(self, rfc_pr_0230, ckb2021)); Ok(builder) } @@ -78,6 +81,7 @@ impl HardForkConfig { .rfc_pr_0221(self.rfc_pr_0221.unwrap_or(default)) .rfc_pr_0222(self.rfc_pr_0222.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) + .rfc_pr_0224(self.rfc_pr_0224.unwrap_or(default)) .rfc_pr_0230(self.rfc_pr_0230.unwrap_or(default)) .build() } diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 74a87a71d6..aad021eab3 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -563,7 +563,7 @@ impl ChainSpec { .parent_hash(self.genesis.parent_hash.pack()) .timestamp(self.genesis.timestamp.pack()) .compact_target(self.genesis.compact_target.pack()) - .uncles_hash(self.genesis.uncles_hash.pack()) + .extra_hash(self.genesis.uncles_hash.pack()) .dao(dao) .nonce(u128::from_le_bytes(self.genesis.nonce.to_le_bytes()).pack()) .transaction(cellbase_transaction) diff --git a/store/src/cache.rs b/store/src/cache.rs index 07d3156b0e..c3e138b512 100644 --- a/store/src/cache.rs +++ b/store/src/cache.rs @@ -2,7 +2,7 @@ use ckb_app_config::StoreConfig; use ckb_types::{ bytes::Bytes, core::{HeaderView, TransactionView, UncleBlockVecView}, - packed::{Byte32, ProposalShortIdVec}, + packed::{self, Byte32, ProposalShortIdVec}, }; use ckb_util::Mutex; use lru::LruCache; @@ -19,6 +19,8 @@ pub struct StoreCache { pub block_tx_hashes: Mutex>>, /// TODO(doc): @quake pub block_uncles: Mutex>, + /// The cache of block extension sections. + pub block_extensions: Mutex>>, /// TODO(doc): @quake pub cellbase: Mutex>, } @@ -38,6 +40,7 @@ impl StoreCache { block_proposals: Mutex::new(LruCache::new(config.block_proposals_cache_size)), block_tx_hashes: Mutex::new(LruCache::new(config.block_tx_hashes_cache_size)), block_uncles: Mutex::new(LruCache::new(config.block_uncles_cache_size)), + block_extensions: Mutex::new(LruCache::new(config.block_extensions_cache_size)), cellbase: Mutex::new(LruCache::new(config.cellbase_cache_size)), } } diff --git a/store/src/store.rs b/store/src/store.rs index a82b02cd57..dcc7e08d6a 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -2,10 +2,10 @@ use crate::cache::StoreCache; use crate::data_loader_wrapper::DataLoaderWrapper; use ckb_db::iter::{DBIter, Direction, IteratorMode}; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER, - COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_EPOCH, - COLUMN_INDEX, COLUMN_META, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, - META_TIP_HEADER_KEY, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, + COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, + COLUMN_CELL_DATA, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_TRANSACTION_INFO, + COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, }; use ckb_freezer::Freezer; use ckb_types::{ @@ -48,8 +48,9 @@ pub trait ChainStore<'a>: Send + Sync + Sized { if let Some(freezer) = self.freezer() { if header.number() > 0 && header.number() < freezer.number() { let raw_block = freezer.retrieve(header.number()).expect("block frozen")?; - let raw_block = - packed::BlockReader::from_slice_should_be_ok(&raw_block).to_entity(); + let raw_block = packed::BlockReader::from_compatible_slice(&raw_block) + .expect("checked data") + .to_entity(); return Some(raw_block.into_view()); } } @@ -60,7 +61,14 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let proposals = self .get_block_proposal_txs_ids(h) .expect("block proposal_ids must be stored"); - Some(BlockView::new_unchecked(header, uncles, body, proposals)) + let extension_opt = self.get_block_extension(h); + + let block = if let Some(extension) = extension_opt { + BlockView::new_unchecked_with_extension(header, uncles, body, proposals, extension) + } else { + BlockView::new_unchecked(header, uncles, body, proposals) + }; + Some(block) } /// Get header by block header hash @@ -127,7 +135,18 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .to_entity() }) .expect("block proposal_ids must be stored"); - Some(BlockView::new_unchecked(header, uncles, body, proposals)) + + let extension_opt = self + .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + + let block = if let Some(extension) = extension_opt { + BlockView::new_unchecked_with_extension(header, uncles, body, proposals, extension) + } else { + BlockView::new_unchecked(header, uncles, body, proposals) + }; + + Some(block) } /// Get all transaction-hashes in block body by block header hash @@ -210,6 +229,24 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } } + /// Get block extension by block header hash + fn get_block_extension(&'a self, hash: &packed::Byte32) -> Option { + if let Some(cache) = self.cache() { + if let Some(data) = cache.block_extensions.lock().get(hash) { + return data.clone(); + } + }; + + let ret = self + .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + + if let Some(cache) = self.cache() { + cache.block_extensions.lock().put(hash.clone(), ret.clone()); + } + ret + } + /// Get block ext by block header hash fn get_block_ext(&'a self, block_hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_EXT, block_hash.as_slice()) @@ -283,7 +320,8 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let raw_block = freezer .retrieve(tx_info.block_number) .expect("block frozen")?; - let raw_block_reader = packed::BlockReader::from_slice_should_be_ok(&raw_block); + let raw_block_reader = + packed::BlockReader::from_compatible_slice(&raw_block).expect("checked data"); let tx_reader = raw_block_reader.transactions().get(tx_info.index)?; return Some((tx_reader.to_entity().into_view(), tx_info)); } diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 1d0be49008..8aa64db61f 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -5,10 +5,10 @@ use ckb_db::{ DBVector, RocksDBTransaction, RocksDBTransactionSnapshot, }; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER, - COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_EPOCH, - COLUMN_INDEX, COLUMN_META, COLUMN_NUMBER_HASH, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, - META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, + COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, + COLUMN_CELL_DATA, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_NUMBER_HASH, + COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, }; use ckb_error::Error; use ckb_freezer::Freezer; @@ -126,6 +126,13 @@ impl StoreTransaction { let txs_len: packed::Uint32 = (block.transactions().len() as u32).pack(); self.insert_raw(COLUMN_BLOCK_HEADER, hash.as_slice(), header.as_slice())?; self.insert_raw(COLUMN_BLOCK_UNCLE, hash.as_slice(), uncles.as_slice())?; + if let Some(extension) = block.extension() { + self.insert_raw( + COLUMN_BLOCK_EXTENSION, + hash.as_slice(), + &extension.as_slice(), + )?; + } self.insert_raw( COLUMN_NUMBER_HASH, packed::NumberHash::new_builder() @@ -157,6 +164,7 @@ impl StoreTransaction { let txs_len = block.transactions().len(); self.delete(COLUMN_BLOCK_HEADER, hash.as_slice())?; self.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; + self.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; self.delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; self.delete( COLUMN_NUMBER_HASH, diff --git a/store/src/write_batch.rs b/store/src/write_batch.rs index 2c5b1076b2..792aa8cf43 100644 --- a/store/src/write_batch.rs +++ b/store/src/write_batch.rs @@ -1,7 +1,7 @@ use ckb_db::RocksDBWriteBatch; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, - COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_NUMBER_HASH, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EXTENSION, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, + COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_NUMBER_HASH, }; use ckb_error::Error; use ckb_types::{core::BlockNumber, packed, prelude::*}; @@ -87,6 +87,7 @@ impl StoreWriteBatch { txs_len: u32, ) -> Result<(), Error> { self.inner.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; + self.inner.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; self.inner .delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; self.inner.delete( diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 3fc785de37..72b2fe1efb 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -641,8 +641,52 @@ impl CKBProtocolHandler for Relayer { return; } - let msg = match packed::RelayMessage::from_slice(&data) { - Ok(msg) => msg.to_enum(), + let msg = match packed::RelayMessageReader::from_compatible_slice(&data) { + Ok(msg) => { + let item = msg.to_enum(); + if let packed::RelayMessageUnionReader::CompactBlock(ref reader) = item { + if reader.count_extra_fields() > 1 { + info_target!( + crate::LOG_TARGET_RELAY, + "Peer {} sends us a malformed message: \ + too many fields in CompactBlock", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields in CompactBlock", + ), + ); + return; + } else { + item + } + } else { + match packed::RelayMessageReader::from_slice(&data) { + Ok(msg) => msg.to_enum(), + _ => { + info_target!( + crate::LOG_TARGET_RELAY, + "Peer {} sends us a malformed message: \ + too many fields", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message \ + too many fields", + ), + ); + return; + } + } + } + } _ => { info_target!( crate::LOG_TARGET_RELAY, @@ -675,7 +719,7 @@ impl CKBProtocolHandler for Relayer { } let start_time = Instant::now(); - self.process(nc, peer_index, msg.as_reader()); + self.process(nc, peer_index, msg); debug_target!( crate::LOG_TARGET_RELAY, "process message={}, peer={}, cost={:?}", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 2f01723cd9..bc665402d7 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -677,8 +677,50 @@ impl CKBProtocolHandler for Synchronizer { peer_index: PeerIndex, data: Bytes, ) { - let msg = match packed::SyncMessage::from_slice(&data) { - Ok(msg) => msg.to_enum(), + let msg = match packed::SyncMessageReader::from_compatible_slice(&data) { + Ok(msg) => { + let item = msg.to_enum(); + if let packed::SyncMessageUnionReader::SendBlock(ref reader) = item { + if reader.count_extra_fields() > 1 { + info!( + "Peer {} sends us a malformed message: \ + too many fields in SendBlock", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields in SendBlock", + ), + ); + return; + } else { + item + } + } else { + match packed::SyncMessageReader::from_slice(&data) { + Ok(msg) => msg.to_enum(), + _ => { + info!( + "Peer {} sends us a malformed message: \ + too many fields", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields", + ), + ); + return; + } + } + } + } _ => { info!("Peer {} sends us a malformed message", peer_index); nc.ban_peer( @@ -702,7 +744,7 @@ impl CKBProtocolHandler for Synchronizer { } let start_time = Instant::now(); - self.process(nc.as_ref(), peer_index, msg.as_reader()); + self.process(nc.as_ref(), peer_index, msg); debug!( "process message={}, peer={}, cost={:?}", msg.item_name(), diff --git a/test/src/main.rs b/test/src/main.rs index 72686eac96..5ed659efdd 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -490,6 +490,7 @@ fn all_specs() -> Vec> { // Test hard fork features Box::new(CheckAbsoluteEpochSince), Box::new(CheckRelativeEpochSince), + Box::new(CheckBlockExtension), Box::new(CheckBlockVersion), Box::new(CheckTxVersion), Box::new(DuplicateCellDepsForDataHashTypeLockScript), diff --git a/test/src/specs/hardfork/v2021/extension.rs b/test/src/specs/hardfork/v2021/extension.rs new file mode 100644 index 0000000000..3359b2f5da --- /dev/null +++ b/test/src/specs/hardfork/v2021/extension.rs @@ -0,0 +1,185 @@ +use crate::{ + node::waiting_for_sync, + util::{ + check::{assert_epoch_should_be, assert_submit_block_fail, assert_submit_block_ok}, + mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, + }, + utils::wait_until, +}; +use crate::{Node, Spec}; +use ckb_logger::{info, trace}; +use ckb_types::prelude::*; + +const GENESIS_EPOCH_LENGTH: u64 = 10; + +const ERROR_UNKNOWN_FIELDS: &str = "Invalid: Block(UnknownFields("; +const ERROR_EMPTY_EXT: &str = "Invalid: Block(EmptyBlockExtension("; +const ERROR_MAX_LIMIT: &str = "Invalid: Block(ExceededMaximumBlockExtensionBytes("; + +pub struct CheckBlockExtension; + +impl Spec for CheckBlockExtension { + crate::setup!(num_nodes: 3); + + fn run(&self, nodes: &mut Vec) { + { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, empty extension field is failed"); + test_extension_via_size(node, Some(0), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, overlength extension field is failed"); + test_extension_via_size(node, Some(97), Err(ERROR_UNKNOWN_FIELDS)); + } + for size in &[1, 16, 32, 64, 96] { + info!("CKB v2019, {}-bytes extension field is failed", size); + test_extension_via_size(node, Some(*size), Err(ERROR_UNKNOWN_FIELDS)); + } + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, no extension field is passed"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 1, 3, epoch_length); + + mine_until_epoch(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, empty extension field is failed (boundary)"); + test_extension_via_size(node, Some(0), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, overlength extension field is failed (boundary)"); + test_extension_via_size(node, Some(97), Err(ERROR_UNKNOWN_FIELDS)); + } + for size in &[1, 16, 32, 64, 96] { + info!( + "CKB v2019, {}-bytes extension field is failed (boundary)", + size + ); + test_extension_via_size(node, Some(*size), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, no extension field is passed (boundary)"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); + + { + info!("CKB v2021, empty extension field is failed (boundary)"); + test_extension_via_size(node, Some(0), Err(ERROR_EMPTY_EXT)); + } + { + info!("CKB v2021, overlength extension field is failed (boundary)"); + test_extension_via_size(node, Some(97), Err(ERROR_MAX_LIMIT)); + } + assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); + for size in &[1, 16, 32, 64, 96] { + info!( + "CKB v2021, {}-bytes extension field is passed (boundary)", + size + ); + test_extension_via_size(node, Some(*size), Ok(())); + } + { + info!("CKB v2021, no extension field is passed (boundary)"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 2, 5, epoch_length); + + mine_until_epoch(node, 4, 0, epoch_length); + { + info!("CKB v2021, empty extension field is failed"); + test_extension_via_size(node, Some(0), Err(ERROR_EMPTY_EXT)); + } + { + info!("CKB v2021, overlength extension field is failed"); + test_extension_via_size(node, Some(97), Err(ERROR_MAX_LIMIT)); + } + assert_epoch_should_be(node, 4, 0, epoch_length); + for size in &[1, 16, 32, 64, 96] { + info!("CKB v2021, {}-bytes extension field is passed", size); + test_extension_via_size(node, Some(*size), Ok(())); + } + { + info!("CKB v2021, no extension field is passed"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 4, 6, epoch_length); + } + + { + info!("test sync blocks for two nodes"); + let node0 = &nodes[0]; + let node1 = &nodes[1]; + + let rpc_client0 = node0.rpc_client(); + let rpc_client1 = node1.rpc_client(); + + node1.connect(node0); + let ret = wait_until(30, || { + let number0 = rpc_client0.get_tip_block_number(); + let number1 = rpc_client1.get_tip_block_number(); + trace!("block number: node0: {}, node1: {}", number0, number1); + number0 == number1 + }); + assert!(ret, "node1 should get same tip header with node0"); + } + + { + info!("test reload data from store after restart the node"); + let node0 = &mut nodes[0]; + node0.stop(); + node0.start(); + } + + { + info!("test sync blocks for all nodes"); + let node0 = &nodes[0]; + let node1 = &nodes[1]; + let node2 = &nodes[2]; + + let rpc_client0 = node0.rpc_client(); + let rpc_client2 = node2.rpc_client(); + + node1.connect(node0); + node2.connect(node0); + let ret = wait_until(30, || { + let header0 = rpc_client0.get_tip_header(); + let header2 = rpc_client2.get_tip_header(); + header0 == header2 + }); + assert!(ret, "node2 should get same tip header with node0"); + + mine(node2, 5); + + info!("test sync blocks"); + waiting_for_sync(nodes); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0224 = Some(2); + } + } +} + +fn test_extension_via_size(node: &Node, size: Option, result: Result<(), &'static str>) { + let block = node + .new_block_builder(None, None, None) + .extension(size.map(|s| vec![0u8; s].pack())) + .build(); + if let Err(errmsg) = result { + assert_submit_block_fail(node, &block, errmsg); + } else { + assert_submit_block_ok(node, &block); + } +} diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs index 920756f286..f2d42d9e7f 100644 --- a/test/src/specs/hardfork/v2021/mod.rs +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -1,4 +1,5 @@ mod cell_deps; +mod extension; mod since; mod version; @@ -6,5 +7,6 @@ pub use cell_deps::{ DuplicateCellDepsForDataHashTypeLockScript, DuplicateCellDepsForDataHashTypeTypeScript, DuplicateCellDepsForTypeHashTypeLockScript, DuplicateCellDepsForTypeHashTypeTypeScript, }; +pub use extension::CheckBlockExtension; pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; pub use version::{CheckBlockVersion, CheckTxVersion}; diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index 4af7be6449..d674cbea64 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -72,6 +72,7 @@ genesis_epoch_length = 1000 rfc_pr_0221 = 9_223_372_036_854_775_807 rfc_pr_0222 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 +rfc_pr_0224 = 9_223_372_036_854_775_807 rfc_pr_0230 = 9_223_372_036_854_775_807 [pow] diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index de025e5994..793e61e305 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -256,6 +256,7 @@ impl TxPoolService { cellbase: BlockAssembler::transform_cellbase(&cellbase, None), work_id: work_id.into(), dao: dao.into(), + extension: None, }) } diff --git a/util/app-config/src/configs/store.rs b/util/app-config/src/configs/store.rs index fa3471562d..88cf1164be 100644 --- a/util/app-config/src/configs/store.rs +++ b/util/app-config/src/configs/store.rs @@ -13,6 +13,9 @@ pub struct Config { pub block_tx_hashes_cache_size: usize, /// The maximum number of blocks which uncles section is cached. pub block_uncles_cache_size: usize, + /// The maximum number of blocks which extension section is cached. + #[serde(default = "default_block_extensions_cache_size")] + pub block_extensions_cache_size: usize, /// The maximum number of blocks which cellbase transaction is cached. pub cellbase_cache_size: usize, /// whether enable freezer @@ -20,6 +23,10 @@ pub struct Config { pub freezer_enable: bool, } +const fn default_block_extensions_cache_size() -> usize { + 30 +} + fn default_freezer_enable() -> bool { false } @@ -32,6 +39,7 @@ impl Default for Config { block_proposals_cache_size: 30, block_tx_hashes_cache_size: 30, block_uncles_cache_size: 30, + block_extensions_cache_size: default_block_extensions_cache_size(), cellbase_cache_size: 30, freezer_enable: false, } diff --git a/util/jsonrpc-types/src/block_template.rs b/util/jsonrpc-types/src/block_template.rs index b53715d78e..53c962ef64 100644 --- a/util/jsonrpc-types/src/block_template.rs +++ b/util/jsonrpc-types/src/block_template.rs @@ -1,6 +1,6 @@ use crate::{ - BlockNumber, Byte32, Cycle, EpochNumberWithFraction, Header, ProposalShortId, Timestamp, - Transaction, Uint32, Uint64, Version, + BlockNumber, Byte32, Cycle, EpochNumberWithFraction, Header, JsonBytes, ProposalShortId, + Timestamp, Transaction, Uint32, Uint64, Version, }; use ckb_types::{packed, prelude::*, H256}; use serde::{Deserialize, Serialize}; @@ -84,6 +84,12 @@ pub struct BlockTemplate { /// /// See RFC [Deposit and Withdraw in Nervos DAO](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#calculation). pub dao: Byte32, + /// The extension for the new block. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } impl From for packed::Block { @@ -100,6 +106,7 @@ impl From for packed::Block { proposals, cellbase, dao, + extension, .. } = block_template; let raw = packed::RawHeader::new_builder() @@ -116,25 +123,49 @@ impl From for packed::Block { .push(cellbase.into()) .extend(transactions.into_iter().map(|tx| tx.into())) .build(); - packed::Block::new_builder() - .header(header) - .uncles( - uncles - .into_iter() - .map(|u| u.into()) - .collect::>() - .pack(), - ) - .transactions(txs) - .proposals( - proposals - .into_iter() - .map(|p| p.into()) - .collect::>() - .pack(), - ) - .build() - .reset_header() + if let Some(extension) = extension { + let extension: packed::Bytes = extension.into(); + packed::BlockV1::new_builder() + .header(header) + .uncles( + uncles + .into_iter() + .map(|u| u.into()) + .collect::>() + .pack(), + ) + .transactions(txs) + .proposals( + proposals + .into_iter() + .map(|p| p.into()) + .collect::>() + .pack(), + ) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(header) + .uncles( + uncles + .into_iter() + .map(|u| u.into()) + .collect::>() + .pack(), + ) + .transactions(txs) + .proposals( + proposals + .into_iter() + .map(|p| p.into()) + .collect::>() + .pack(), + ) + .build() + } + .reset_header() } } diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index d1ded04ae3..f04095c86f 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -637,6 +637,12 @@ pub struct Header { /// /// It is all zeros when `proposals` is empty, or the hash on all the bytes concatenated together. pub proposals_hash: H256, + // TODO ckb2021 Returns the extra hash as uncles hash directly since no extension now. + // The hash on `uncles` and extension in the block body. + // + // The uncles hash is all zeros when `uncles` is empty, or the hash on all the uncle header hashes concatenated together. + // The extension hash is the hash of the extension. + // The extra hash is the hash on uncles hash and extension hash concatenated together. /// The hash on `uncles` in the block body. /// /// It is all zeros when `uncles` is empty, or the hash on all the uncle header hashes concatenated together. @@ -696,7 +702,7 @@ impl From for Header { transactions_root: raw.transactions_root().unpack(), proposals_hash: raw.proposals_hash().unpack(), compact_target: raw.compact_target().unpack(), - uncles_hash: raw.uncles_hash().unpack(), + uncles_hash: raw.extra_hash().unpack(), dao: raw.dao().into(), nonce: input.nonce().unpack(), } @@ -743,7 +749,7 @@ impl From
for packed::Header { .transactions_root(transactions_root.pack()) .proposals_hash(proposals_hash.pack()) .compact_target(compact_target.pack()) - .uncles_hash(uncles_hash.pack()) + .extra_hash(uncles_hash.pack()) .dao(dao.into()) .build(); packed::Header::new_builder() @@ -843,6 +849,12 @@ pub struct Block { pub transactions: Vec, /// The proposal IDs in the block body. pub proposals: Vec, + /// The extension in the block body. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } /// The JSON view of a Block including header and body. @@ -856,6 +868,12 @@ pub struct BlockView { pub transactions: Vec, /// The proposal IDs in the block body. pub proposals: Vec, + /// The extension in the block body. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } impl From for Block { @@ -865,6 +883,7 @@ impl From for Block { uncles: input.uncles().into_iter().map(Into::into).collect(), transactions: input.transactions().into_iter().map(Into::into).collect(), proposals: input.proposals().into_iter().map(Into::into).collect(), + extension: input.extension().map(Into::into), } } } @@ -905,6 +924,7 @@ impl From for BlockView { uncles, transactions, proposals: block.proposals().into_iter().map(Into::into).collect(), + extension: block.extension().map(Into::into), } } } @@ -916,13 +936,26 @@ impl From for packed::Block { uncles, transactions, proposals, + extension, } = json; - packed::Block::new_builder() - .header(header.into()) - .uncles(uncles.into_iter().map(Into::into).pack()) - .transactions(transactions.into_iter().map(Into::into).pack()) - .proposals(proposals.into_iter().map(Into::into).pack()) - .build() + if let Some(extension) = extension { + let extension: packed::Bytes = extension.into(); + packed::BlockV1::new_builder() + .header(header.into()) + .uncles(uncles.into_iter().map(Into::into).pack()) + .transactions(transactions.into_iter().map(Into::into).pack()) + .proposals(proposals.into_iter().map(Into::into).pack()) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(header.into()) + .uncles(uncles.into_iter().map(Into::into).pack()) + .transactions(transactions.into_iter().map(Into::into).pack()) + .proposals(proposals.into_iter().map(Into::into).pack()) + .build() + } } } @@ -933,6 +966,7 @@ impl From for core::BlockView { uncles, transactions, proposals, + extension, } = input; let block = Block { header: header.inner, @@ -948,6 +982,7 @@ impl From for core::BlockView { .collect(), transactions: transactions.into_iter().map(|tx| tx.inner).collect(), proposals, + extension, }; let block: packed::Block = block.into(); block.into_view() diff --git a/util/types/schemas/blockchain.mol b/util/types/schemas/blockchain.mol index 842872007a..3ab343d856 100644 --- a/util/types/schemas/blockchain.mol +++ b/util/types/schemas/blockchain.mol @@ -77,7 +77,7 @@ struct RawHeader { parent_hash: Byte32, transactions_root: Byte32, proposals_hash: Byte32, - uncles_hash: Byte32, + extra_hash: Byte32, dao: Byte32, } @@ -98,6 +98,14 @@ table Block { proposals: ProposalShortIdVec, } +table BlockV1 { + header: Header, + uncles: UncleBlockVec, + transactions: TransactionVec, + proposals: ProposalShortIdVec, + extension: Bytes, +} + table CellbaseWitness { lock: Script, message: Bytes, diff --git a/util/types/schemas/extensions.mol b/util/types/schemas/extensions.mol index 78c15227f6..dbaf7fb470 100644 --- a/util/types/schemas/extensions.mol +++ b/util/types/schemas/extensions.mol @@ -110,6 +110,15 @@ table CompactBlock { proposals: ProposalShortIdVec, } +table CompactBlockV1 { + header: Header, + short_ids: ProposalShortIdVec, + prefilled_transactions: IndexTransactionVec, + uncles: Byte32Vec, + proposals: ProposalShortIdVec, + extension: Bytes, +} + table RelayTransaction { cycles: Uint64, transaction: Transaction, diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index 6024c7208d..e42697c1a2 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -43,7 +43,7 @@ pub struct HeaderBuilder { pub(crate) transactions_root: packed::Byte32, pub(crate) proposals_hash: packed::Byte32, pub(crate) compact_target: packed::Uint32, - pub(crate) uncles_hash: packed::Byte32, + pub(crate) extra_hash: packed::Byte32, pub(crate) epoch: packed::Uint64, pub(crate) dao: packed::Byte32, // Nonce @@ -63,6 +63,7 @@ pub struct BlockBuilder { pub(crate) uncles: Vec, pub(crate) transactions: Vec, pub(crate) proposals: Vec, + pub(crate) extension: Option, } /* @@ -79,7 +80,7 @@ impl ::std::default::Default for HeaderBuilder { transactions_root: Default::default(), proposals_hash: Default::default(), compact_target: DIFF_TWO.pack(), - uncles_hash: Default::default(), + extra_hash: Default::default(), epoch: Default::default(), dao: Default::default(), nonce: Default::default(), @@ -240,7 +241,7 @@ impl HeaderBuilder { def_setter_simple!(transactions_root, Byte32); def_setter_simple!(proposals_hash, Byte32); def_setter_simple!(compact_target, Uint32); - def_setter_simple!(uncles_hash, Byte32); + def_setter_simple!(extra_hash, Byte32); def_setter_simple!(epoch, Uint64); def_setter_simple!(dao, Byte32); def_setter_simple!(nonce, Uint128); @@ -255,7 +256,7 @@ impl HeaderBuilder { transactions_root, proposals_hash, compact_target, - uncles_hash, + extra_hash, epoch, dao, nonce, @@ -272,7 +273,7 @@ impl HeaderBuilder { .transactions_root(transactions_root) .proposals_hash(proposals_hash) .compact_target(compact_target) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .epoch(epoch) .dao(dao) .build(); @@ -290,7 +291,7 @@ impl BlockBuilder { def_setter_simple!(header, transactions_root, Byte32); def_setter_simple!(header, proposals_hash, Byte32); def_setter_simple!(header, compact_target, Uint32); - def_setter_simple!(header, uncles_hash, Byte32); + def_setter_simple!(header, extra_hash, Byte32); def_setter_simple!(header, epoch, Uint64); def_setter_simple!(header, dao, Byte32); def_setter_simple!(header, nonce, Uint128); @@ -316,12 +317,20 @@ impl BlockBuilder { self } + /// Set `extension`. + #[doc(hidden)] + pub fn extension(mut self, extension: Option) -> Self { + self.extension = extension; + self + } + fn build_internal(self, reset_header: bool) -> core::BlockView { let Self { header, uncles, transactions, proposals, + extension, } = self; let (uncles, uncle_hashes) = { let len = uncles.len(); @@ -376,22 +385,37 @@ impl BlockBuilder { let witnesses_root = merkle_root(&tx_witness_hashes[..]); let transactions_root = merkle_root(&[raw_transactions_root, witnesses_root]); let proposals_hash = proposals.calc_proposals_hash(); - let uncles_hash = uncles.calc_uncles_hash(); + let extra_hash_view = core::ExtraHashView::new( + uncles.calc_uncles_hash(), + extension.as_ref().map(packed::Bytes::calc_raw_data_hash), + ); + let extra_hash = extra_hash_view.extra_hash(); header .transactions_root(transactions_root) .proposals_hash(proposals_hash) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .build() } else { header.build() }; - let block = packed::Block::new_builder() - .header(data) - .uncles(uncles) - .transactions(transactions.pack()) - .proposals(proposals) - .build(); + let block = if let Some(extension) = extension { + packed::BlockV1::new_builder() + .header(data) + .uncles(uncles) + .transactions(transactions.pack()) + .proposals(proposals) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(data) + .uncles(uncles) + .transactions(transactions.pack()) + .proposals(proposals) + .build() + }; core::BlockView { data: block, hash, @@ -448,7 +472,7 @@ impl packed::Header { .transactions_root(self.raw().transactions_root()) .proposals_hash(self.raw().proposals_hash()) .compact_target(self.raw().compact_target()) - .uncles_hash(self.raw().uncles_hash()) + .extra_hash(self.raw().extra_hash()) .epoch(self.raw().epoch()) .dao(self.raw().dao()) .nonce(self.nonce()) @@ -538,5 +562,6 @@ impl core::BlockView { .collect::>(), ) .proposals(data.proposals().into_iter().collect::>()) + .extension(data.extension()) } } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index 78fafd8a6b..0049b79404 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -97,6 +97,7 @@ pub struct HardForkSwitch { rfc_pr_0221: EpochNumber, rfc_pr_0222: EpochNumber, rfc_pr_0223: EpochNumber, + rfc_pr_0224: EpochNumber, rfc_pr_0230: EpochNumber, } @@ -120,6 +121,10 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0223: Option, + /// Reuse `uncles_hash` in the header as `extra_hash`. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0224: Option, /// Allow unknown block versions and transactions versions. /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) @@ -138,6 +143,7 @@ impl HardForkSwitch { .rfc_pr_0221(self.rfc_pr_0221()) .rfc_pr_0222(self.rfc_pr_0222()) .rfc_pr_0223(self.rfc_pr_0223()) + .rfc_pr_0224(self.rfc_pr_0224()) .rfc_pr_0230(self.rfc_pr_0230()) } @@ -148,6 +154,7 @@ impl HardForkSwitch { .disable_rfc_pr_0221() .disable_rfc_pr_0222() .disable_rfc_pr_0223() + .disable_rfc_pr_0224() .disable_rfc_pr_0230() .build() .unwrap() @@ -175,6 +182,13 @@ define_methods!( disable_rfc_pr_0223, "RFC PR 0223" ); +define_methods!( + rfc_pr_0224, + reuse_uncles_hash_as_extra_hash, + is_reuse_uncles_hash_as_extra_hash_enabled, + disable_rfc_pr_0224, + "RFC PR 0224" +); define_methods!( rfc_pr_0230, allow_unknown_versions, @@ -201,11 +215,13 @@ impl HardForkSwitchBuilder { let rfc_pr_0221 = try_find!(rfc_pr_0221); let rfc_pr_0222 = try_find!(rfc_pr_0222); let rfc_pr_0223 = try_find!(rfc_pr_0223); + let rfc_pr_0224 = try_find!(rfc_pr_0224); let rfc_pr_0230 = try_find!(rfc_pr_0230); Ok(HardForkSwitch { rfc_pr_0221, rfc_pr_0222, rfc_pr_0223, + rfc_pr_0224, rfc_pr_0230, }) } diff --git a/util/types/src/core/mod.rs b/util/types/src/core/mod.rs index babf79a4b3..5eb1d142fe 100644 --- a/util/types/src/core/mod.rs +++ b/util/types/src/core/mod.rs @@ -30,7 +30,9 @@ pub use extras::{BlockExt, EpochExt, EpochNumberWithFraction, TransactionInfo}; pub use fee_rate::FeeRate; pub use reward::{BlockEconomicState, BlockIssuance, BlockReward, MinerReward}; pub use transaction_meta::{TransactionMeta, TransactionMetaBuilder}; -pub use views::{BlockView, HeaderView, TransactionView, UncleBlockVecView, UncleBlockView}; +pub use views::{ + BlockView, ExtraHashView, HeaderView, TransactionView, UncleBlockVecView, UncleBlockView, +}; pub use ckb_occupied_capacity::{capacity_bytes, Capacity, Ratio, Result as CapacityResult}; pub use ckb_rational::RationalU256; diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index 1b6fa6fed8..00aede05ef 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -2,6 +2,7 @@ use std::collections::HashSet; +use ckb_hash::new_blake2b; use ckb_occupied_capacity::Result as CapacityResult; use crate::{ @@ -37,6 +38,17 @@ pub struct TransactionView { pub(crate) witness_hash: packed::Byte32, } +/// A readonly and immutable struct which includes extra hash and the decoupled +/// parts of it. +#[derive(Debug, Clone)] +pub struct ExtraHashView { + /// The uncles hash which is used to combine to the extra hash. + pub(crate) uncles_hash: packed::Byte32, + /// The first item is the new filed hash, which is used to combine to the extra hash. + /// The second item is the extra hash. + pub(crate) extension_hash_and_extra_hash: Option<(packed::Byte32, packed::Byte32)>, +} + /// A readonly and immutable struct which includes [`Header`] and its hash. /// /// # Notice @@ -114,6 +126,24 @@ impl ::std::fmt::Display for TransactionView { } } +impl ::std::fmt::Display for ExtraHashView { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + if let Some((ref extension_hash, ref extra_hash)) = self.extension_hash_and_extra_hash { + write!( + f, + "uncles_hash: {}, extension_hash: {}, extra_hash: {}", + self.uncles_hash, extension_hash, extra_hash + ) + } else { + write!( + f, + "uncles_hash: {}, extension_hash: None, extra_hash: uncles_hash", + self.uncles_hash + ) + } + } +} + impl ::std::fmt::Display for HeaderView { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!( @@ -394,6 +424,44 @@ impl TransactionView { } } +impl ExtraHashView { + /// Creates `ExtraHashView` with `uncles_hash` and optional `extension_hash`. + pub fn new(uncles_hash: packed::Byte32, extension_hash_opt: Option) -> Self { + let extension_hash_and_extra_hash = extension_hash_opt.map(|extension_hash| { + let mut ret = [0u8; 32]; + let mut blake2b = new_blake2b(); + blake2b.update(&uncles_hash.as_slice()); + blake2b.update(&extension_hash.as_slice()); + blake2b.finalize(&mut ret); + (extension_hash, ret.pack()) + }); + Self { + uncles_hash, + extension_hash_and_extra_hash, + } + } + + /// Gets `uncles_hash`. + pub fn uncles_hash(&self) -> packed::Byte32 { + self.uncles_hash.clone() + } + + /// Gets `extension_hash`. + pub fn extension_hash(&self) -> Option { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(ref extension_hash, _)| extension_hash.clone()) + } + + /// Gets `extra_hash`. + pub fn extra_hash(&self) -> packed::Byte32 { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(_, ref extra_hash)| extra_hash.clone()) + .unwrap_or_else(|| self.uncles_hash.clone()) + } +} + impl HeaderView { define_data_getter!(Header); define_cache_getter!(hash, Byte32); @@ -407,12 +475,8 @@ impl HeaderView { define_inner_getter!(header, packed, parent_hash, Byte32); define_inner_getter!(header, packed, transactions_root, Byte32); define_inner_getter!(header, packed, proposals_hash, Byte32); - define_inner_getter!(header, packed, uncles_hash, Byte32); - - /// Gets `raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().raw().dao() - } + define_inner_getter!(header, packed, extra_hash, Byte32); + define_inner_getter!(header, packed, dao, Byte32); /// Gets `raw.difficulty`. pub fn difficulty(&self) -> U256 { @@ -449,12 +513,8 @@ impl UncleBlockView { define_inner_getter!(uncle, packed, parent_hash, Byte32); define_inner_getter!(uncle, packed, transactions_root, Byte32); define_inner_getter!(uncle, packed, proposals_hash, Byte32); - define_inner_getter!(uncle, packed, uncles_hash, Byte32); - - /// Gets `header.raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().header().raw().dao() - } + define_inner_getter!(uncle, packed, extra_hash, Byte32); + define_inner_getter!(uncle, packed, dao, Byte32); /// Gets `header.raw.difficulty`. pub fn difficulty(&self) -> U256 { @@ -560,12 +620,8 @@ impl BlockView { define_inner_getter!(block, packed, parent_hash, Byte32); define_inner_getter!(block, packed, transactions_root, Byte32); define_inner_getter!(block, packed, proposals_hash, Byte32); - define_inner_getter!(block, packed, uncles_hash, Byte32); - - /// Gets `header.raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().header().raw().dao() - } + define_inner_getter!(block, packed, extra_hash, Byte32); + define_inner_getter!(block, packed, dao, Byte32); /// Gets `header.nonce`. pub fn nonce(&self) -> u128 { @@ -593,6 +649,15 @@ impl BlockView { } } + /// Gets `extension`. + /// + /// # Panics + /// + /// Panics if the extension exists but not a valid [`Bytes`](../packed/struct.Bytes.html). + pub fn extension(&self) -> Option { + self.data.extension() + } + /// Converts into an uncle block. pub fn as_uncle(&self) -> UncleBlockView { UncleBlockView { @@ -668,6 +733,16 @@ impl BlockView { self.data().as_reader().calc_uncles_hash() } + /// Calculates the hash for extension. + pub fn calc_extension_hash(&self) -> Option { + self.data().as_reader().calc_extension_hash() + } + + /// Calculates the extra hash. + pub fn calc_extra_hash(&self) -> ExtraHashView { + self.data().as_reader().calc_extra_hash() + } + /// Calculates the hash for proposals. pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.data().as_reader().calc_proposals_hash() @@ -751,6 +826,38 @@ impl BlockView { tx_witness_hashes, } } + + /// Creates a new `BlockView` with a extension. + /// + /// # Notice + /// + /// [`BlockView`] created by this method could have invalid hashes or + /// invalid merkle roots in the header. + pub fn new_unchecked_with_extension( + header: HeaderView, + uncles: UncleBlockVecView, + body: Vec, + proposals: packed::ProposalShortIdVec, + extension: packed::Bytes, + ) -> Self { + let block = packed::BlockV1::new_builder() + .header(header.data()) + .transactions(body.iter().map(|tx| tx.data()).pack()) + .uncles(uncles.data()) + .proposals(proposals) + .extension(extension) + .build() + .as_v0(); + let tx_hashes = body.iter().map(|tx| tx.hash()).collect::>(); + let tx_witness_hashes = body.iter().map(|tx| tx.witness_hash()).collect::>(); + Self { + data: block, + hash: header.hash(), + uncle_hashes: uncles.hashes(), + tx_hashes, + tx_witness_hashes, + } + } } /* diff --git a/util/types/src/extension/calc_hash.rs b/util/types/src/extension/calc_hash.rs index 5aa536bc6a..c69f68f5fb 100644 --- a/util/types/src/extension/calc_hash.rs +++ b/util/types/src/extension/calc_hash.rs @@ -1,6 +1,6 @@ use ckb_hash::{blake2b_256, new_blake2b}; -use crate::{packed, prelude::*}; +use crate::{core, packed, prelude::*}; /* * Calculate simple hash for packed bytes wrappers. @@ -70,6 +70,16 @@ impl packed::CellOutput { } } +impl<'r> packed::BytesReader<'r> { + /// Calculates the hash for raw data in `Bytes`. + /// + /// Returns the empty hash if no data, otherwise, calculates the hash of the data and returns it. + pub fn calc_raw_data_hash(&self) -> packed::Byte32 { + blake2b_256(self.raw_data()).pack() + } +} +impl_calc_special_hash_for_entity!(Bytes, calc_raw_data_hash); + impl<'r> packed::ScriptReader<'r> { /// Calculates the hash for [self.as_slice()] as the script hash. /// @@ -236,6 +246,24 @@ impl<'r> packed::BlockReader<'r> { self.uncles().calc_uncles_hash() } + /// Calculates the hash for the extension. + /// + /// If there is an extension (unknown for now), calculate the hash of its data. + pub fn calc_extension_hash(&self) -> Option { + self.extension() + .map(|extension| extension.calc_raw_data_hash()) + } + + /// Calculates the extra hash, which is a combination of the uncles hash and + /// the extension hash. + /// + /// - If there is no extension, extra hash is the same as the uncles hash. + /// - If there is a extension, then extra hash it the hash of the combination + /// of uncles hash and the extension hash. + pub fn calc_extra_hash(&self) -> core::ExtraHashView { + core::ExtraHashView::new(self.calc_uncles_hash(), self.calc_extension_hash()) + } + /// Calculates transaction hashes for all transactions in the block. pub fn calc_tx_hashes(&self) -> Vec { self.transactions() @@ -256,6 +284,8 @@ impl<'r> packed::BlockReader<'r> { impl_calc_special_hash_for_entity!(Block, calc_header_hash); impl_calc_special_hash_for_entity!(Block, calc_proposals_hash); impl_calc_special_hash_for_entity!(Block, calc_uncles_hash); +impl_calc_special_hash_for_entity!(Block, calc_extension_hash, Option); +impl_calc_special_hash_for_entity!(Block, calc_extra_hash, core::ExtraHashView); impl_calc_special_hash_for_entity!(Block, calc_tx_hashes, Vec); impl_calc_special_hash_for_entity!(Block, calc_tx_witness_hashes, Vec); @@ -329,7 +359,7 @@ mod tests { .proposals_hash( h256!("0xd1670e45af1deb9cc00951d71c09ce80932e7ddf9fb151d744436bd04ac4a562").pack(), ) - .uncles_hash( + .extra_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) .dao(h256!("0xb54bdd7f6be90000bb52f392d41cd70024f7ef29b437000000febffacf030000").pack()) @@ -359,7 +389,7 @@ mod tests { .proposals_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) - .uncles_hash( + .extra_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) .dao(h256!("0xb54bdd7f6be90000bb52f392d41cd70024f7ef29b437000000febffacf030000").pack()) @@ -384,6 +414,13 @@ mod tests { assert_eq!(uncles.calc_uncles_hash(), expect.pack()); } + #[test] + fn empty_extra_hash() { + let block = packed::Block::new_builder().build(); + let expect = h256!("0x0"); + assert_eq!(block.calc_extra_hash().extra_hash(), expect.pack()); + } + #[test] fn empty_script_hash() { let script = packed::Script::new_builder().build(); diff --git a/util/types/src/extension/shortcuts.rs b/util/types/src/extension/shortcuts.rs index 5a6a33ed60..a2d37ebf83 100644 --- a/util/types/src/extension/shortcuts.rs +++ b/util/types/src/extension/shortcuts.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; use crate::{ + bytes, core::{self, BlockNumber}, packed, prelude::*, @@ -192,17 +193,102 @@ impl packed::Block { let witnesses_root = merkle_root(tx_witness_hashes); let transactions_root = merkle_root(&[raw_transactions_root, witnesses_root]); let proposals_hash = self.as_reader().calc_proposals_hash(); - let uncles_hash = self.as_reader().calc_uncles_hash(); + let extra_hash = self.as_reader().calc_extra_hash().extra_hash(); let raw_header = self .header() .raw() .as_builder() .transactions_root(transactions_root) .proposals_hash(proposals_hash) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .build(); let header = self.header().as_builder().raw(raw_header).build(); - self.as_builder().header(header).build() + if let Some(extension) = self.extension() { + packed::BlockV1::new_builder() + .header(header) + .uncles(self.uncles()) + .transactions(self.transactions()) + .proposals(self.proposals()) + .extension(extension) + .build() + .as_v0() + } else { + self.as_builder().header(header).build() + } + } + + /// Gets the i-th extra field if it exists; i started from 0. + pub fn extra_field(&self, index: usize) -> Option { + let count = self.count_extra_fields(); + if count > index { + let slice = self.as_slice(); + let i = (1 + Self::FIELD_COUNT + index) * molecule::NUMBER_SIZE; + let start = molecule::unpack_number(&slice[i..]) as usize; + if count == index + 1 { + Some(self.as_bytes().slice(start..)) + } else { + let j = i + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[j..]) as usize; + Some(self.as_bytes().slice(start..end)) + } + } else { + None + } + } + + /// Gets the extension field if it existed. + /// + /// # Panics + /// + /// Panics if the first extra field exists but not a valid [`Bytes`](struct.Bytes.html). + pub fn extension(&self) -> Option { + self.extra_field(0) + .map(|data| packed::Bytes::from_slice(&data).unwrap()) + } +} + +impl packed::BlockV1 { + /// Converts to a compatible [`Block`](struct.Block.html) with an extra field. + pub fn as_v0(&self) -> packed::Block { + packed::Block::new_unchecked(self.as_bytes()) + } +} + +impl<'r> packed::BlockReader<'r> { + /// Gets the i-th extra field if it exists; i started from 0. + pub fn extra_field(&self, index: usize) -> Option<&[u8]> { + let count = self.count_extra_fields(); + if count > index { + let slice = self.as_slice(); + let i = (1 + Self::FIELD_COUNT + index) * molecule::NUMBER_SIZE; + let start = molecule::unpack_number(&slice[i..]) as usize; + if count == index + 1 { + Some(&self.as_slice()[start..]) + } else { + let j = i + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[j..]) as usize; + Some(&self.as_slice()[start..end]) + } + } else { + None + } + } + + /// Gets the extension field if it existed. + /// + /// # Panics + /// + /// Panics if the first extra field exists but not a valid [`BytesReader`](struct.BytesReader.html). + pub fn extension(&self) -> Option { + self.extra_field(0) + .map(|data| packed::BytesReader::from_slice(&data).unwrap()) + } +} + +impl<'r> packed::BlockV1Reader<'r> { + /// Converts to a compatible [`BlockReader`](struct.BlockReader.html) with an extra field. + pub fn as_v0(&self) -> packed::BlockReader { + packed::BlockReader::new_unchecked(self.as_slice()) } } @@ -237,13 +323,25 @@ impl packed::CompactBlock { } } - packed::CompactBlock::new_builder() - .header(block.data().header()) - .short_ids(short_ids.pack()) - .prefilled_transactions(prefilled_transactions.pack()) - .uncles(block.uncle_hashes.clone()) - .proposals(block.data().proposals()) - .build() + if let Some(extension) = block.data().extension() { + packed::CompactBlockV1::new_builder() + .header(block.data().header()) + .short_ids(short_ids.pack()) + .prefilled_transactions(prefilled_transactions.pack()) + .uncles(block.uncle_hashes.clone()) + .proposals(block.data().proposals()) + .extension(extension) + .build() + .as_v0() + } else { + packed::CompactBlock::new_builder() + .header(block.data().header()) + .short_ids(short_ids.pack()) + .prefilled_transactions(prefilled_transactions.pack()) + .uncles(block.uncle_hashes.clone()) + .proposals(block.data().proposals()) + .build() + } } /// Takes proposal short ids for the transactions which are not prefilled. @@ -288,3 +386,17 @@ impl packed::CompactBlock { .collect() } } + +impl packed::CompactBlockV1 { + /// Converts to a compatible [`CompactBlock`](struct.CompactBlock.html) with an extra field. + pub fn as_v0(&self) -> packed::CompactBlock { + packed::CompactBlock::new_unchecked(self.as_bytes()) + } +} + +impl<'r> packed::CompactBlockV1Reader<'r> { + /// Converts to a compatible [`CompactBlockReader`](struct.CompactBlockReader.html) with an extra field. + pub fn as_v0(&self) -> packed::CompactBlockReader { + packed::CompactBlockReader::new_unchecked(self.as_slice()) + } +} diff --git a/util/types/src/generated/blockchain.rs b/util/types/src/generated/blockchain.rs index d4bb90835b..0e0223a845 100644 --- a/util/types/src/generated/blockchain.rs +++ b/util/types/src/generated/blockchain.rs @@ -6961,7 +6961,7 @@ impl ::core::fmt::Display for RawHeader { write!(f, ", {}: {}", "parent_hash", self.parent_hash())?; write!(f, ", {}: {}", "transactions_root", self.transactions_root())?; write!(f, ", {}: {}", "proposals_hash", self.proposals_hash())?; - write!(f, ", {}: {}", "uncles_hash", self.uncles_hash())?; + write!(f, ", {}: {}", "extra_hash", self.extra_hash())?; write!(f, ", {}: {}", "dao", self.dao())?; write!(f, " }}") } @@ -7008,7 +7008,7 @@ impl RawHeader { pub fn proposals_hash(&self) -> Byte32 { Byte32::new_unchecked(self.0.slice(96..128)) } - pub fn uncles_hash(&self) -> Byte32 { + pub fn extra_hash(&self) -> Byte32 { Byte32::new_unchecked(self.0.slice(128..160)) } pub fn dao(&self) -> Byte32 { @@ -7049,7 +7049,7 @@ impl molecule::prelude::Entity for RawHeader { .parent_hash(self.parent_hash()) .transactions_root(self.transactions_root()) .proposals_hash(self.proposals_hash()) - .uncles_hash(self.uncles_hash()) + .extra_hash(self.extra_hash()) .dao(self.dao()) } } @@ -7080,7 +7080,7 @@ impl<'r> ::core::fmt::Display for RawHeaderReader<'r> { write!(f, ", {}: {}", "parent_hash", self.parent_hash())?; write!(f, ", {}: {}", "transactions_root", self.transactions_root())?; write!(f, ", {}: {}", "proposals_hash", self.proposals_hash())?; - write!(f, ", {}: {}", "uncles_hash", self.uncles_hash())?; + write!(f, ", {}: {}", "extra_hash", self.extra_hash())?; write!(f, ", {}: {}", "dao", self.dao())?; write!(f, " }}") } @@ -7113,7 +7113,7 @@ impl<'r> RawHeaderReader<'r> { pub fn proposals_hash(&self) -> Byte32Reader<'r> { Byte32Reader::new_unchecked(&self.as_slice()[96..128]) } - pub fn uncles_hash(&self) -> Byte32Reader<'r> { + pub fn extra_hash(&self) -> Byte32Reader<'r> { Byte32Reader::new_unchecked(&self.as_slice()[128..160]) } pub fn dao(&self) -> Byte32Reader<'r> { @@ -7151,7 +7151,7 @@ pub struct RawHeaderBuilder { pub(crate) parent_hash: Byte32, pub(crate) transactions_root: Byte32, pub(crate) proposals_hash: Byte32, - pub(crate) uncles_hash: Byte32, + pub(crate) extra_hash: Byte32, pub(crate) dao: Byte32, } impl RawHeaderBuilder { @@ -7190,8 +7190,8 @@ impl RawHeaderBuilder { self.proposals_hash = v; self } - pub fn uncles_hash(mut self, v: Byte32) -> Self { - self.uncles_hash = v; + pub fn extra_hash(mut self, v: Byte32) -> Self { + self.extra_hash = v; self } pub fn dao(mut self, v: Byte32) -> Self { @@ -7214,7 +7214,7 @@ impl molecule::prelude::Builder for RawHeaderBuilder { writer.write_all(self.parent_hash.as_slice())?; writer.write_all(self.transactions_root.as_slice())?; writer.write_all(self.proposals_hash.as_slice())?; - writer.write_all(self.uncles_hash.as_slice())?; + writer.write_all(self.extra_hash.as_slice())?; writer.write_all(self.dao.as_slice())?; Ok(()) } @@ -7989,6 +7989,353 @@ impl molecule::prelude::Builder for BlockBuilder { } } #[derive(Clone)] +pub struct BlockV1(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "transactions", self.transactions())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl ::core::default::Default for BlockV1 { + fn default() -> Self { + let v: Vec = vec![ + 248, 0, 0, 0, 24, 0, 0, 0, 232, 0, 0, 0, 236, 0, 0, 0, 240, 0, 0, 0, 244, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + BlockV1::new_unchecked(v.into()) + } +} +impl BlockV1 { + pub const FIELD_COUNT: usize = 5; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> Header { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + Header::new_unchecked(self.0.slice(start..end)) + } + pub fn uncles(&self) -> UncleBlockVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + UncleBlockVec::new_unchecked(self.0.slice(start..end)) + } + pub fn transactions(&self) -> TransactionVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + TransactionVec::new_unchecked(self.0.slice(start..end)) + } + pub fn proposals(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn extension(&self) -> Bytes { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[24..]) as usize; + Bytes::new_unchecked(self.0.slice(start..end)) + } else { + Bytes::new_unchecked(self.0.slice(start..)) + } + } + pub fn as_reader<'r>(&'r self) -> BlockV1Reader<'r> { + BlockV1Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for BlockV1 { + type Builder = BlockV1Builder; + const NAME: &'static str = "BlockV1"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + BlockV1(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BlockV1Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BlockV1Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder() + .header(self.header()) + .uncles(self.uncles()) + .transactions(self.transactions()) + .proposals(self.proposals()) + .extension(self.extension()) + } +} +#[derive(Clone, Copy)] +pub struct BlockV1Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "transactions", self.transactions())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl<'r> BlockV1Reader<'r> { + pub const FIELD_COUNT: usize = 5; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> HeaderReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + HeaderReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn uncles(&self) -> UncleBlockVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + UncleBlockVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn transactions(&self) -> TransactionVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + TransactionVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn proposals(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn extension(&self) -> BytesReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[24..]) as usize; + BytesReader::new_unchecked(&self.as_slice()[start..end]) + } else { + BytesReader::new_unchecked(&self.as_slice()[start..]) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for BlockV1Reader<'r> { + type Entity = BlockV1; + const NAME: &'static str = "BlockV1Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + BlockV1Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let total_size = molecule::unpack_number(slice) as usize; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + if slice_len == molecule::NUMBER_SIZE && Self::FIELD_COUNT == 0 { + return Ok(()); + } + if slice_len < molecule::NUMBER_SIZE * 2 { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE * 2, slice_len); + } + let offset_first = molecule::unpack_number(&slice[molecule::NUMBER_SIZE..]) as usize; + if offset_first % molecule::NUMBER_SIZE != 0 || offset_first < molecule::NUMBER_SIZE * 2 { + return ve!(Self, OffsetsNotMatch); + } + if slice_len < offset_first { + return ve!(Self, HeaderIsBroken, offset_first, slice_len); + } + let field_count = offset_first / molecule::NUMBER_SIZE - 1; + if field_count < Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + } else if !compatible && field_count > Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + }; + let mut offsets: Vec = slice[molecule::NUMBER_SIZE..offset_first] + .chunks_exact(molecule::NUMBER_SIZE) + .map(|x| molecule::unpack_number(x) as usize) + .collect(); + offsets.push(total_size); + if offsets.windows(2).any(|i| i[0] > i[1]) { + return ve!(Self, OffsetsNotMatch); + } + HeaderReader::verify(&slice[offsets[0]..offsets[1]], compatible)?; + UncleBlockVecReader::verify(&slice[offsets[1]..offsets[2]], compatible)?; + TransactionVecReader::verify(&slice[offsets[2]..offsets[3]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[3]..offsets[4]], compatible)?; + BytesReader::verify(&slice[offsets[4]..offsets[5]], compatible)?; + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct BlockV1Builder { + pub(crate) header: Header, + pub(crate) uncles: UncleBlockVec, + pub(crate) transactions: TransactionVec, + pub(crate) proposals: ProposalShortIdVec, + pub(crate) extension: Bytes, +} +impl BlockV1Builder { + pub const FIELD_COUNT: usize = 5; + pub fn header(mut self, v: Header) -> Self { + self.header = v; + self + } + pub fn uncles(mut self, v: UncleBlockVec) -> Self { + self.uncles = v; + self + } + pub fn transactions(mut self, v: TransactionVec) -> Self { + self.transactions = v; + self + } + pub fn proposals(mut self, v: ProposalShortIdVec) -> Self { + self.proposals = v; + self + } + pub fn extension(mut self, v: Bytes) -> Self { + self.extension = v; + self + } +} +impl molecule::prelude::Builder for BlockV1Builder { + type Entity = BlockV1; + const NAME: &'static str = "BlockV1Builder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1) + + self.header.as_slice().len() + + self.uncles.as_slice().len() + + self.transactions.as_slice().len() + + self.proposals.as_slice().len() + + self.extension.as_slice().len() + } + fn write(&self, writer: &mut W) -> ::molecule::io::Result<()> { + let mut total_size = molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1); + let mut offsets = Vec::with_capacity(Self::FIELD_COUNT); + offsets.push(total_size); + total_size += self.header.as_slice().len(); + offsets.push(total_size); + total_size += self.uncles.as_slice().len(); + offsets.push(total_size); + total_size += self.transactions.as_slice().len(); + offsets.push(total_size); + total_size += self.proposals.as_slice().len(); + offsets.push(total_size); + total_size += self.extension.as_slice().len(); + writer.write_all(&molecule::pack_number(total_size as molecule::Number))?; + for offset in offsets.into_iter() { + writer.write_all(&molecule::pack_number(offset as molecule::Number))?; + } + writer.write_all(self.header.as_slice())?; + writer.write_all(self.uncles.as_slice())?; + writer.write_all(self.transactions.as_slice())?; + writer.write_all(self.proposals.as_slice())?; + writer.write_all(self.extension.as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + BlockV1::new_unchecked(inner.into()) + } +} +#[derive(Clone)] pub struct CellbaseWitness(molecule::bytes::Bytes); impl ::core::fmt::LowerHex for CellbaseWitness { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { diff --git a/util/types/src/generated/extensions.rs b/util/types/src/generated/extensions.rs index b537a795ee..3b182362e1 100644 --- a/util/types/src/generated/extensions.rs +++ b/util/types/src/generated/extensions.rs @@ -5567,6 +5567,388 @@ impl molecule::prelude::Builder for CompactBlockBuilder { } } #[derive(Clone)] +pub struct CompactBlockV1(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "short_ids", self.short_ids())?; + write!( + f, + ", {}: {}", + "prefilled_transactions", + self.prefilled_transactions() + )?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl ::core::default::Default for CompactBlockV1 { + fn default() -> Self { + let v: Vec = vec![ + 0, 1, 0, 0, 28, 0, 0, 0, 236, 0, 0, 0, 240, 0, 0, 0, 244, 0, 0, 0, 248, 0, 0, 0, 252, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + CompactBlockV1::new_unchecked(v.into()) + } +} +impl CompactBlockV1 { + pub const FIELD_COUNT: usize = 6; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> Header { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + Header::new_unchecked(self.0.slice(start..end)) + } + pub fn short_ids(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn prefilled_transactions(&self) -> IndexTransactionVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + IndexTransactionVec::new_unchecked(self.0.slice(start..end)) + } + pub fn uncles(&self) -> Byte32Vec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + Byte32Vec::new_unchecked(self.0.slice(start..end)) + } + pub fn proposals(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + let end = molecule::unpack_number(&slice[24..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn extension(&self) -> Bytes { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[24..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[28..]) as usize; + Bytes::new_unchecked(self.0.slice(start..end)) + } else { + Bytes::new_unchecked(self.0.slice(start..)) + } + } + pub fn as_reader<'r>(&'r self) -> CompactBlockV1Reader<'r> { + CompactBlockV1Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for CompactBlockV1 { + type Builder = CompactBlockV1Builder; + const NAME: &'static str = "CompactBlockV1"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + CompactBlockV1(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + CompactBlockV1Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + CompactBlockV1Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder() + .header(self.header()) + .short_ids(self.short_ids()) + .prefilled_transactions(self.prefilled_transactions()) + .uncles(self.uncles()) + .proposals(self.proposals()) + .extension(self.extension()) + } +} +#[derive(Clone, Copy)] +pub struct CompactBlockV1Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "short_ids", self.short_ids())?; + write!( + f, + ", {}: {}", + "prefilled_transactions", + self.prefilled_transactions() + )?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl<'r> CompactBlockV1Reader<'r> { + pub const FIELD_COUNT: usize = 6; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> HeaderReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + HeaderReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn short_ids(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn prefilled_transactions(&self) -> IndexTransactionVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + IndexTransactionVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn uncles(&self) -> Byte32VecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + Byte32VecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn proposals(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + let end = molecule::unpack_number(&slice[24..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn extension(&self) -> BytesReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[24..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[28..]) as usize; + BytesReader::new_unchecked(&self.as_slice()[start..end]) + } else { + BytesReader::new_unchecked(&self.as_slice()[start..]) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for CompactBlockV1Reader<'r> { + type Entity = CompactBlockV1; + const NAME: &'static str = "CompactBlockV1Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + CompactBlockV1Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let total_size = molecule::unpack_number(slice) as usize; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + if slice_len == molecule::NUMBER_SIZE && Self::FIELD_COUNT == 0 { + return Ok(()); + } + if slice_len < molecule::NUMBER_SIZE * 2 { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE * 2, slice_len); + } + let offset_first = molecule::unpack_number(&slice[molecule::NUMBER_SIZE..]) as usize; + if offset_first % molecule::NUMBER_SIZE != 0 || offset_first < molecule::NUMBER_SIZE * 2 { + return ve!(Self, OffsetsNotMatch); + } + if slice_len < offset_first { + return ve!(Self, HeaderIsBroken, offset_first, slice_len); + } + let field_count = offset_first / molecule::NUMBER_SIZE - 1; + if field_count < Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + } else if !compatible && field_count > Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + }; + let mut offsets: Vec = slice[molecule::NUMBER_SIZE..offset_first] + .chunks_exact(molecule::NUMBER_SIZE) + .map(|x| molecule::unpack_number(x) as usize) + .collect(); + offsets.push(total_size); + if offsets.windows(2).any(|i| i[0] > i[1]) { + return ve!(Self, OffsetsNotMatch); + } + HeaderReader::verify(&slice[offsets[0]..offsets[1]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[1]..offsets[2]], compatible)?; + IndexTransactionVecReader::verify(&slice[offsets[2]..offsets[3]], compatible)?; + Byte32VecReader::verify(&slice[offsets[3]..offsets[4]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[4]..offsets[5]], compatible)?; + BytesReader::verify(&slice[offsets[5]..offsets[6]], compatible)?; + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct CompactBlockV1Builder { + pub(crate) header: Header, + pub(crate) short_ids: ProposalShortIdVec, + pub(crate) prefilled_transactions: IndexTransactionVec, + pub(crate) uncles: Byte32Vec, + pub(crate) proposals: ProposalShortIdVec, + pub(crate) extension: Bytes, +} +impl CompactBlockV1Builder { + pub const FIELD_COUNT: usize = 6; + pub fn header(mut self, v: Header) -> Self { + self.header = v; + self + } + pub fn short_ids(mut self, v: ProposalShortIdVec) -> Self { + self.short_ids = v; + self + } + pub fn prefilled_transactions(mut self, v: IndexTransactionVec) -> Self { + self.prefilled_transactions = v; + self + } + pub fn uncles(mut self, v: Byte32Vec) -> Self { + self.uncles = v; + self + } + pub fn proposals(mut self, v: ProposalShortIdVec) -> Self { + self.proposals = v; + self + } + pub fn extension(mut self, v: Bytes) -> Self { + self.extension = v; + self + } +} +impl molecule::prelude::Builder for CompactBlockV1Builder { + type Entity = CompactBlockV1; + const NAME: &'static str = "CompactBlockV1Builder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1) + + self.header.as_slice().len() + + self.short_ids.as_slice().len() + + self.prefilled_transactions.as_slice().len() + + self.uncles.as_slice().len() + + self.proposals.as_slice().len() + + self.extension.as_slice().len() + } + fn write(&self, writer: &mut W) -> ::molecule::io::Result<()> { + let mut total_size = molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1); + let mut offsets = Vec::with_capacity(Self::FIELD_COUNT); + offsets.push(total_size); + total_size += self.header.as_slice().len(); + offsets.push(total_size); + total_size += self.short_ids.as_slice().len(); + offsets.push(total_size); + total_size += self.prefilled_transactions.as_slice().len(); + offsets.push(total_size); + total_size += self.uncles.as_slice().len(); + offsets.push(total_size); + total_size += self.proposals.as_slice().len(); + offsets.push(total_size); + total_size += self.extension.as_slice().len(); + writer.write_all(&molecule::pack_number(total_size as molecule::Number))?; + for offset in offsets.into_iter() { + writer.write_all(&molecule::pack_number(offset as molecule::Number))?; + } + writer.write_all(self.header.as_slice())?; + writer.write_all(self.short_ids.as_slice())?; + writer.write_all(self.prefilled_transactions.as_slice())?; + writer.write_all(self.uncles.as_slice())?; + writer.write_all(self.proposals.as_slice())?; + writer.write_all(self.extension.as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + CompactBlockV1::new_unchecked(inner.into()) + } +} +#[derive(Clone)] pub struct RelayTransaction(molecule::bytes::Bytes); impl ::core::fmt::LowerHex for RelayTransaction { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index 3f956d2a30..4d563007ef 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -174,7 +174,7 @@ fn test_invalid_uncle_hash_case2() { .cloned() .unwrap() .as_advanced_builder() - .uncles_hash(uncles_hash.clone()) + .extra_hash(uncles_hash.clone()) .build_unchecked(); let epoch = epoch(&shared, &chain1, chain1.len() - 2); diff --git a/verification/contextual/src/uncles_verifier.rs b/verification/contextual/src/uncles_verifier.rs index 0333f03179..f7523a27e7 100644 --- a/verification/contextual/src/uncles_verifier.rs +++ b/verification/contextual/src/uncles_verifier.rs @@ -44,14 +44,18 @@ where pub fn verify(&self) -> Result<(), Error> { let uncles_count = self.block.data().uncles().len() as u32; - // verify uncles_hash - let actual_uncles_hash = self.block.calc_uncles_hash(); - if actual_uncles_hash != self.block.uncles_hash() { - return Err(UnclesError::InvalidHash { - expected: self.block.uncles_hash(), - actual: actual_uncles_hash, + let epoch_number = self.block.epoch().number(); + let hardfork_switch = self.provider.consensus().hardfork_switch(); + if !hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number) { + // verify uncles_hash + let actual_uncles_hash = self.block.calc_uncles_hash(); + if actual_uncles_hash != self.block.extra_hash() { + return Err(UnclesError::InvalidHash { + expected: self.block.extra_hash(), + actual: actual_uncles_hash, + } + .into()); } - .into()); } // if self.block.uncles is empty, return diff --git a/verification/src/block_verifier.rs b/verification/src/block_verifier.rs index dbab3c36c4..4c5490c490 100644 --- a/verification/src/block_verifier.rs +++ b/verification/src/block_verifier.rs @@ -16,6 +16,7 @@ use std::collections::HashSet; /// Contains: /// - [`CellbaseVerifier`](./struct.CellbaseVerifier.html) /// - [`BlockBytesVerifier`](./struct.BlockBytesVerifier.html) +/// - [`BlockExtensionVerifier`](./struct.BlockExtensionVerifier.html) /// - [`BlockProposalsLimitVerifier`](./struct.BlockProposalsLimitVerifier.html) /// - [`DuplicateVerifier`](./struct.DuplicateVerifier.html) /// - [`MerkleRootVerifier`](./struct.MerkleRootVerifier.html) @@ -39,6 +40,7 @@ impl<'a> Verifier for BlockVerifier<'a> { let max_block_bytes = self.consensus.max_block_bytes(); BlockProposalsLimitVerifier::new(max_block_proposals_limit).verify(target)?; BlockBytesVerifier::new(max_block_bytes).verify(target)?; + BlockExtensionVerifier::new(self.consensus).verify(target)?; CellbaseVerifier::new().verify(target)?; DuplicateVerifier::new().verify(target)?; MerkleRootVerifier::new().verify(target) @@ -236,6 +238,57 @@ impl BlockBytesVerifier { } } +/// BlockExtensionVerifier. +/// +/// Check block extension. +#[derive(Clone)] +pub struct BlockExtensionVerifier<'a> { + consensus: &'a Consensus, +} + +impl<'a> BlockExtensionVerifier<'a> { + pub fn new(consensus: &'a Consensus) -> Self { + BlockExtensionVerifier { consensus } + } + + pub fn verify(&self, block: &BlockView) -> Result<(), Error> { + let epoch_number = block.epoch().number(); + let hardfork_switch = self.consensus.hardfork_switch(); + let extra_fields_count = block.data().count_extra_fields(); + + if hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number) { + match extra_fields_count { + 0 => {} + 1 => { + let extension = if let Some(data) = block.extension() { + data + } else { + return Err(BlockErrorKind::UnknownFields.into()); + }; + if extension.is_empty() { + return Err(BlockErrorKind::EmptyBlockExtension.into()); + } + if extension.len() > 96 { + return Err(BlockErrorKind::ExceededMaximumBlockExtensionBytes.into()); + } + } + _ => { + return Err(BlockErrorKind::UnknownFields.into()); + } + } + + let actual_extra_hash = block.calc_extra_hash().extra_hash(); + if actual_extra_hash != block.extra_hash() { + return Err(BlockErrorKind::InvalidExtraHash.into()); + } + } else if extra_fields_count > 0 { + return Err(BlockErrorKind::UnknownFields.into()); + } + + Ok(()) + } +} + /// Context-independent verification checks for block transactions /// /// Basic checks that don't depend on any context diff --git a/verification/src/error.rs b/verification/src/error.rs index 72badedd05..0ea443d7de 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -102,6 +102,18 @@ pub enum BlockErrorKind { /// Total bytes of block exceeds limit. ExceededMaximumBlockBytes, + + /// Empty block extension. + EmptyBlockExtension, + + /// Total bytes of block extension exceeds limit. + ExceededMaximumBlockExtensionBytes, + + /// The block has unknown field. + UnknownFields, + + /// The calculated extra-hash does not match with the one in the header. + InvalidExtraHash, } def_error_base_on_kind!( From 40847cad75f4967b4723286927533dd90431d33c Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 27 May 2021 06:55:12 +0800 Subject: [PATCH 10/18] refactor: remove useless parameter "with_data" because it always be true (tricky) --- util/types/src/core/cell.rs | 53 +++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 4098c7321d..cf826c3f98 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -600,11 +600,11 @@ fn parse_dep_group_data(slice: &[u8]) -> Result { } } -fn resolve_dep_group Result>( +fn resolve_dep_group Result>( out_point: &OutPoint, mut cell_resolver: F, ) -> Result<(CellMeta, Vec), OutPointError> { - let dep_group_cell = cell_resolver(out_point, true)?; + let dep_group_cell = cell_resolver(out_point)?; let data = dep_group_cell .mem_cell_data .clone() @@ -614,7 +614,7 @@ fn resolve_dep_group Result( ); let mut current_inputs = HashSet::new(); - let mut resolve_cell = - |out_point: &OutPoint, with_data: bool| -> Result { - if seen_inputs.contains(out_point) { - return Err(OutPointError::Dead(out_point.clone())); - } + let resolve_cell = |out_point: &OutPoint, with_data: bool| -> Result { + if seen_inputs.contains(out_point) { + return Err(OutPointError::Dead(out_point.clone())); + } - let cell_status = cell_provider.cell(out_point, with_data); - match cell_status { - CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), - CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), - CellStatus::Live(cell_meta) => Ok(cell_meta), - } - }; + let cell_status = cell_provider.cell(out_point, with_data); + match cell_status { + CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), + CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), + CellStatus::Live(cell_meta) => Ok(cell_meta), + } + }; // skip resolve input of cellbase if !transaction.is_cellbase() { @@ -657,9 +656,12 @@ pub fn resolve_transaction( } } + let mut resolve_cell_alway_with_data = + |out_point: &OutPoint| -> Result { resolve_cell(out_point, true) }; + resolve_transaction_deps_with_system_cell_cache( &transaction, - &mut resolve_cell, + &mut resolve_cell_alway_with_data, &mut resolved_cell_deps, &mut resolved_dep_groups, )?; @@ -678,7 +680,7 @@ pub fn resolve_transaction( } fn resolve_transaction_deps_with_system_cell_cache< - F: FnMut(&OutPoint, bool) -> Result, + F: FnMut(&OutPoint) -> Result, >( transaction: &TransactionView, cell_resolver: &mut F, @@ -718,7 +720,7 @@ fn resolve_transaction_deps_with_system_cell_cache< Ok(()) } -fn resolve_transaction_dep Result>( +fn resolve_transaction_dep Result>( cell_dep: &CellDep, cell_resolver: &mut F, resolved_cell_deps: &mut Vec, @@ -729,7 +731,7 @@ fn resolve_transaction_dep Result Result( cell_provider: &CP, out_point: &OutPoint, - with_data: bool, ) -> Result { - let cell_status = cell_provider.cell(out_point, with_data); + let cell_status = cell_provider.cell(out_point, true); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -781,21 +782,21 @@ pub fn setup_system_cell_cache( let mut cell_deps = HashMap::new(); let secp_code_dep_cell = - build_cell_meta_from_out_point(cell_provider, &secp_code_dep.out_point(), true) + build_cell_meta_from_out_point(cell_provider, &secp_code_dep.out_point()) .expect("resolve secp_code_dep_cell"); cell_deps.insert(secp_code_dep, ResolvedDep::Cell(secp_code_dep_cell)); - let dao_dep_cell = build_cell_meta_from_out_point(cell_provider, &dao_dep.out_point(), true) + let dao_dep_cell = build_cell_meta_from_out_point(cell_provider, &dao_dep.out_point()) .expect("resolve dao_dep_cell"); cell_deps.insert(dao_dep, ResolvedDep::Cell(dao_dep_cell)); let secp_data_dep_cell = - build_cell_meta_from_out_point(cell_provider, &secp_data_dep.out_point(), true) + build_cell_meta_from_out_point(cell_provider, &secp_data_dep.out_point()) .expect("resolve secp_data_dep_cell"); cell_deps.insert(secp_data_dep, ResolvedDep::Cell(secp_data_dep_cell)); - let resolve_cell = |out_point: &OutPoint, with_data: bool| -> Result { - build_cell_meta_from_out_point(cell_provider, out_point, with_data) + let resolve_cell = |out_point: &OutPoint| -> Result { + build_cell_meta_from_out_point(cell_provider, out_point) }; let secp_group_dep_cell = resolve_dep_group(&secp_group_dep.out_point(), resolve_cell) From 10ffedf61e1edefe6a4e3da910ed5903b4066226 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 27 May 2021 07:13:20 +0800 Subject: [PATCH 11/18] feat(hardfork): allow loading uncommitted cell data hashes from tx pool --- benches/benches/benchmarks/resolve.rs | 6 +- benches/benches/benchmarks/util.rs | 8 +- chain/src/chain.rs | 4 + chain/src/tests/basic.rs | 14 ++-- chain/src/tests/cell.rs | 16 ++-- chain/src/tests/load_input_data_hash_cell.rs | 69 +++++++++++++---- .../tests/non_contextual_block_txs_verify.rs | 1 - chain/src/tests/util.rs | 8 +- rpc/src/module/chain.rs | 5 +- rpc/src/module/experiment.rs | 30 ++++++-- rpc/src/module/test.rs | 8 +- rpc/src/test.rs | 10 ++- script/src/syscalls/load_cell.rs | 14 +++- script/src/syscalls/mod.rs | 7 ++ script/src/verify.rs | 16 +++- spec/src/hardfork.rs | 4 + store/src/store.rs | 2 +- sync/src/synchronizer/mod.rs | 11 ++- sync/src/tests/synchronizer.rs | 1 + sync/src/tests/util.rs | 1 + test/template/specs/integration.toml | 1 + tx-pool/src/component/pending.rs | 7 +- tx-pool/src/component/proposed.rs | 7 +- tx-pool/src/pool.rs | 30 +++++++- util/snapshot/src/lib.rs | 6 +- util/test-chain-utils/src/mock_store.rs | 7 +- util/types/src/core/cell.rs | 77 +++++++++++++------ util/types/src/core/hardfork.rs | 17 ++++ 28 files changed, 306 insertions(+), 81 deletions(-) diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 514ad01e99..1373252a11 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -132,7 +132,8 @@ fn bench(c: &mut Criterion) { let mut seen_inputs = HashSet::new(); for tx in txs.clone() { - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot).unwrap(); + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot, false) + .unwrap(); } i -= 1; @@ -158,7 +159,8 @@ fn bench(c: &mut Criterion) { let rtxs: Vec<_> = txs .into_iter() .map(|tx| { - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot).unwrap() + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot, false) + .unwrap() }) .collect(); diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index d99f76f20a..97498bd8bf 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -487,7 +487,13 @@ pub fn dao_data(shared: &Shared, parent: &HeaderView, txs: &[TransactionView]) - let snapshot: &Snapshot = &shared.snapshot(); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, snapshot); let rtxs = txs.iter().cloned().try_fold(vec![], |mut rtxs, tx| { - let rtx = resolve_transaction(tx, &mut seen_inputs, &overlay_cell_provider, snapshot); + let rtx = resolve_transaction( + tx, + &mut seen_inputs, + &overlay_cell_provider, + snapshot, + false, + ); match rtx { Ok(rtx) => { rtxs.push(rtx); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b88efe00e0..993c287fd6 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -684,6 +684,7 @@ impl ChainService { let verify_context = VerifyContext::new(txn, self.shared.consensus()); let async_handle = self.shared.tx_pool_controller().handle(); + let hardfork_switch = self.shared.consensus().hardfork_switch(); let mut found_error = None; for (ext, b) in fork @@ -704,6 +705,8 @@ impl ChainService { }; let transactions = b.transactions(); + let allow_in_txpool = hardfork_switch + .is_allow_cell_data_hash_in_txpool_enabled(b.epoch().number()); let resolved = { let txn_cell_provider = txn.cell_provider(); let cell_provider = OverlayCellProvider::new(&block_cp, &txn_cell_provider); @@ -716,6 +719,7 @@ impl ChainService { &mut seen_inputs, &cell_provider, &verify_context, + allow_in_txpool, ) }) .collect::, _>>() diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index 292c1207d5..3fff143171 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -115,7 +115,7 @@ fn test_genesis_transaction_spend() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(genesis_tx_hash, 0), false), + .cell(&OutPoint::new(genesis_tx_hash, 0), false, false), CellStatus::Unknown ); } @@ -142,7 +142,7 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(hash.to_owned().to_owned(), 0), false), + .cell(&OutPoint::new(hash.to_owned().to_owned(), 0), false, false), CellStatus::Unknown ); } @@ -171,12 +171,14 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(last_cellbase_hash, 0), false), + .cell(&OutPoint::new(last_cellbase_hash, 0), false, false), CellStatus::Unknown ); assert_eq!( - shared.snapshot().cell(&OutPoint::new(tx1_hash, 0), false), + shared + .snapshot() + .cell(&OutPoint::new(tx1_hash, 0), false, false), CellStatus::Unknown ); @@ -189,7 +191,7 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(tx2_hash.clone(), 0), false), + .cell(&OutPoint::new(tx2_hash.clone(), 0), false, false), CellStatus::live_cell(CellMeta { cell_output: tx2_output, data_bytes: tx2_output_data.len() as u64, @@ -375,7 +377,7 @@ fn test_genesis_transaction_fetch() { let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); let out_point = OutPoint::new(root_hash, 0); - let state = shared.snapshot().cell(&out_point, false); + let state = shared.snapshot().cell(&out_point, false, false); assert!(state.is_live()); } diff --git a/chain/src/tests/cell.rs b/chain/src/tests/cell.rs index 41ca204d4f..68ed98a8f1 100644 --- a/chain/src/tests/cell.rs +++ b/chain/src/tests/cell.rs @@ -144,18 +144,21 @@ fn test_block_cells_update() { for tx in block.transactions()[1..4].iter() { for pt in tx.output_pts() { // full spent - assert_eq!(txn_cell_provider.cell(&pt, false), CellStatus::Unknown); + assert_eq!( + txn_cell_provider.cell(&pt, false, false), + CellStatus::Unknown + ); } } // ensure tx3 outputs is unspent after attach_block_cell for pt in block.transactions()[4].output_pts() { - assert!(txn_cell_provider.cell(&pt, false).is_live()); + assert!(txn_cell_provider.cell(&pt, false, false).is_live()); } // ensure issue_tx outputs is spent after attach_block_cell assert_eq!( - txn_cell_provider.cell(&issue_tx.output_pts()[0], false), + txn_cell_provider.cell(&issue_tx.output_pts()[0], false, false), CellStatus::Unknown ); @@ -164,12 +167,15 @@ fn test_block_cells_update() { // ensure tx0-3 outputs is unknown after detach_block_cell for tx in block.transactions()[1..=4].iter() { for pt in tx.output_pts() { - assert_eq!(txn_cell_provider.cell(&pt, false), CellStatus::Unknown); + assert_eq!( + txn_cell_provider.cell(&pt, false, false), + CellStatus::Unknown + ); } } // ensure issue_tx outputs is back to live after detach_block_cell assert!(txn_cell_provider - .cell(&issue_tx.output_pts()[0], false) + .cell(&issue_tx.output_pts()[0], false, false) .is_live()); } diff --git a/chain/src/tests/load_input_data_hash_cell.rs b/chain/src/tests/load_input_data_hash_cell.rs index f43e7770a5..65fb76663e 100644 --- a/chain/src/tests/load_input_data_hash_cell.rs +++ b/chain/src/tests/load_input_data_hash_cell.rs @@ -4,12 +4,13 @@ use crate::tests::util::{ use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_dao_utils::genesis_dao_data; use ckb_test_chain_utils::load_input_data_hash_cell; +use ckb_tx_pool::{PlugTarget, TxEntry}; use ckb_types::prelude::*; use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, BlockBuilder, Capacity, EpochNumberWithFraction, TransactionBuilder, - TransactionView, + capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, Capacity, EpochNumberWithFraction, + TransactionBuilder, TransactionView, }, packed::{CellDep, CellInput, CellOutputBuilder, OutPoint}, utilities::DIFF_TWO, @@ -48,7 +49,8 @@ pub(crate) fn create_load_input_data_hash_transaction( .build() } -// Ensure tx-pool accept tx which calls syscall load_cell_data_hash from input +// Ensure tx-pool reject or accept tx which calls syscall load_cell_data_hash from input base on +// hardfork features. #[test] fn test_load_input_data_hash_cell() { let (_, _, load_input_data_hash_script) = load_input_data_hash_cell(); @@ -74,20 +76,57 @@ fn test_load_input_data_hash_cell() { .dao(dao) .build(); - let consensus = ConsensusBuilder::default() - .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) - .genesis_block(genesis_block) - .build(); + { + // Test CKB v2019 reject + let hardfork_switch = HardForkSwitch::new_without_any_enabled(); + let consensus = ConsensusBuilder::default() + .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) + .genesis_block(genesis_block.clone()) + .hardfork_switch(hardfork_switch) + .build(); + + let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); + + let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); + let tx1 = create_load_input_data_hash_transaction(&tx0, 0); + + let tx_pool = shared.tx_pool_controller(); + let ret = tx_pool.submit_local_tx(tx0.clone()).unwrap(); + assert!(ret.is_err()); + //ValidationFailure(2) missing item + assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); + + let entry0 = vec![TxEntry::dummy_resolve(tx0, 0, Capacity::shannons(0), 100)]; + tx_pool.plug_entry(entry0, PlugTarget::Proposed).unwrap(); + + // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already in tx-pool + let ret = tx_pool.submit_local_tx(tx1).unwrap(); + assert!(ret.is_err()); + assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); + } + { + // Test CKB v2021 accept + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0228(0) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) + .genesis_block(genesis_block) + .hardfork_switch(hardfork_switch) + .build(); - let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); + let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); - let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); - let tx1 = create_load_input_data_hash_transaction(&tx0, 0); + let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); + let tx1 = create_load_input_data_hash_transaction(&tx0, 0); - let tx_pool = shared.tx_pool_controller(); - let ret = tx_pool.submit_local_tx(tx0).unwrap(); - assert!(ret.is_ok()); + let tx_pool = shared.tx_pool_controller(); + let ret = tx_pool.submit_local_tx(tx0).unwrap(); + assert!(ret.is_ok()); - let ret = tx_pool.submit_local_tx(tx1).unwrap(); - assert!(ret.is_ok()); + let ret = tx_pool.submit_local_tx(tx1).unwrap(); + assert!(ret.is_ok()); + } } diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index 1266f4c3f6..8201fb36d9 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -50,7 +50,6 @@ pub(crate) fn create_cellbase( } } -#[allow(clippy::too_many_arguments)] pub(crate) fn gen_block( parent_header: &HeaderView, transactions: Vec, diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index f3acf25321..5506e9bdc7 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -531,7 +531,13 @@ pub fn dao_data( let transactions_provider = TransactionsProvider::new(txs.iter()); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, store); let rtxs = txs.iter().try_fold(vec![], |mut rtxs, tx| { - let rtx = resolve_transaction(tx.clone(), &mut seen_inputs, &overlay_cell_provider, store); + let rtx = resolve_transaction( + tx.clone(), + &mut seen_inputs, + &overlay_cell_provider, + store, + false, + ); match rtx { Ok(rtx) => { rtxs.push(rtx); diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 8361c1911d..2a43c508a5 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1393,7 +1393,10 @@ impl ChainRpc for ChainRpcImpl { } fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> Result { - let cell_status = self.shared.snapshot().cell(&out_point.into(), with_data); + let cell_status = self + .shared + .snapshot() + .cell(&out_point.into(), with_data, true); Ok(cell_status.into()) } diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index b187462dfa..814d5ef0af 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -214,7 +214,12 @@ pub(crate) struct DryRunner<'a> { } impl<'a> CellProvider for DryRunner<'a> { - fn cell(&self, out_point: &packed::OutPoint, with_data: bool) -> CellStatus { + fn cell( + &self, + out_point: &packed::OutPoint, + with_data: bool, + _allow_in_txpool: bool, + ) -> CellStatus { let snapshot = self.shared.snapshot(); snapshot .get_cell(out_point) @@ -244,12 +249,27 @@ impl<'a> DryRunner<'a> { pub(crate) fn run(&self, tx: packed::Transaction) -> Result { let snapshot: &Snapshot = &self.shared.snapshot(); - match resolve_transaction(tx.into_view(), &mut HashSet::new(), self, self) { + let consensus = snapshot.consensus(); + let tx_env = { + let tip_header = snapshot.tip_header(); + TxVerifyEnv::new_submit(&tip_header) + }; + let allow_in_txpool = { + let proposal_window = consensus.tx_proposal_window(); + let epoch_number = tx_env.epoch_number(proposal_window); + consensus + .hardfork_switch() + .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) + }; + match resolve_transaction( + tx.into_view(), + &mut HashSet::new(), + self, + self, + allow_in_txpool, + ) { Ok(resolved) => { - let consensus = snapshot.consensus(); let max_cycles = consensus.max_block_cycles; - let tip_header = snapshot.tip_header(); - let tx_env = TxVerifyEnv::new_submit(&tip_header); match ScriptVerifier::new( &resolved, consensus, diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index ea364525c9..93641af36e 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -2,7 +2,9 @@ use crate::error::RPCError; use ckb_app_config::BlockAssemblerConfig; use ckb_chain::chain::ChainController; use ckb_dao::DaoCalculator; -use ckb_jsonrpc_types::{Block, BlockTemplate, Cycle, JsonBytes, Script, Transaction}; +use ckb_jsonrpc_types::{ + AsEpochNumberWithFraction, Block, BlockTemplate, Cycle, JsonBytes, Script, Transaction, +}; use ckb_logger::error; use ckb_network::{NetworkController, SupportProtocols}; use ckb_shared::{shared::Shared, Snapshot}; @@ -179,12 +181,16 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { let transactions_provider = TransactionsProvider::new(txs.as_slice().iter()); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, snapshot); + let allow_in_txpool = consensus + .hardfork_switch() + .is_allow_cell_data_hash_in_txpool_enabled(block_template.epoch.epoch_number()); let rtxs = txs.iter().map(|tx| { resolve_transaction( tx.clone(), &mut seen_inputs, &overlay_cell_provider, snapshot, + allow_in_txpool, ).map_err(|err| { error!( "resolve transactions error when generating block with block template, error: {:?}", diff --git a/rpc/src/test.rs b/rpc/src/test.rs index e166d74e0e..d0639cd5b1 100644 --- a/rpc/src/test.rs +++ b/rpc/src/test.rs @@ -110,8 +110,14 @@ fn next_block(shared: &Shared, parent: &HeaderView) -> BlockView { let cellbase = always_success_cellbase(parent.number() + 1, reward.total, shared.consensus()); let dao = { - let resolved_cellbase = - resolve_transaction(cellbase.clone(), &mut HashSet::new(), snapshot, snapshot).unwrap(); + let resolved_cellbase = resolve_transaction( + cellbase.clone(), + &mut HashSet::new(), + snapshot, + snapshot, + false, + ) + .unwrap(); let data_loader = shared.store().as_data_provider(); DaoCalculator::new(shared.consensus(), &data_loader) .dao_field(&[resolved_cellbase], parent) diff --git a/script/src/syscalls/load_cell.rs b/script/src/syscalls/load_cell.rs index 9a2d85710d..3f19738496 100644 --- a/script/src/syscalls/load_cell.rs +++ b/script/src/syscalls/load_cell.rs @@ -24,6 +24,7 @@ pub struct LoadCell<'a, DL> { resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], + allow_cell_data_hash_in_txpool: bool, } impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { @@ -34,6 +35,7 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], + allow_cell_data_hash_in_txpool: bool, ) -> LoadCell<'a, DL> { LoadCell { data_loader, @@ -42,6 +44,7 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { resolved_cell_deps, group_inputs, group_outputs, + allow_cell_data_hash_in_txpool, } } @@ -102,8 +105,15 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { (SUCCESS, store_data(machine, &buffer)?) } CellField::DataHash => { - if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { - (SUCCESS, store_data(machine, &bytes.as_bytes())?) + if self.allow_cell_data_hash_in_txpool { + if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { + (SUCCESS, store_data(machine, &bytes.as_bytes())?) + } else { + (ITEM_MISSING, 0) + } + } else if let Some(data_hash) = &cell.mem_cell_data_hash { + let bytes = data_hash.raw_data(); + (SUCCESS, store_data(machine, &bytes)?) } else { (ITEM_MISSING, 0) } diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index f3864aa036..dfe6543811 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -253,6 +253,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); prop_assert!(load_cell.ecall(&mut machine).is_ok()); @@ -297,6 +298,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -389,6 +391,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -443,6 +446,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -510,6 +514,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); prop_assert!(machine.memory_mut().store64(&size_addr, &16).is_ok()); @@ -564,6 +569,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); assert!(machine.memory_mut().store64(&size_addr, &100).is_ok()); @@ -905,6 +911,7 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, + false, ); prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok()); diff --git a/script/src/verify.rs b/script/src/verify.rs index b6b8f4e892..9c10d5d857 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -240,6 +240,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D &'a self, group_inputs: &'a [usize], group_outputs: &'a [usize], + allow_cell_data_hash_in_txpool: bool, ) -> LoadCell<'a, DL> { LoadCell::new( &self.data_loader, @@ -248,6 +249,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D self.resolved_cell_deps(), group_inputs, group_outputs, + allow_cell_data_hash_in_txpool, ) } @@ -428,12 +430,20 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D script_group: &'a ScriptGroup, ) -> Vec + 'a)>> { let current_script_hash = script_group.script.calc_script_hash(); + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let allow_cell_data_hash_in_txpool = self + .consensus + .hardfork_switch() + .is_allow_cell_data_hash_in_txpool_enabled(epoch_number); vec![ Box::new(self.build_load_script_hash(current_script_hash.clone())), Box::new(self.build_load_tx()), - Box::new( - self.build_load_cell(&script_group.input_indices, &script_group.output_indices), - ), + Box::new(self.build_load_cell( + &script_group.input_indices, + &script_group.output_indices, + allow_cell_data_hash_in_txpool, + )), Box::new(self.build_load_input(&script_group.input_indices)), Box::new(self.build_load_header(&script_group.input_indices)), Box::new( diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index c3f05e51d5..731d0714a6 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -21,6 +21,8 @@ pub struct HardForkConfig { /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0224: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0228: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0230: Option, } @@ -69,6 +71,7 @@ impl HardForkConfig { .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)) + .rfc_pr_0228(check_default!(self, rfc_pr_0228, ckb2021)) .rfc_pr_0230(check_default!(self, rfc_pr_0230, ckb2021)); Ok(builder) } @@ -82,6 +85,7 @@ impl HardForkConfig { .rfc_pr_0222(self.rfc_pr_0222.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .rfc_pr_0224(self.rfc_pr_0224.unwrap_or(default)) + .rfc_pr_0228(self.rfc_pr_0228.unwrap_or(default)) .rfc_pr_0230(self.rfc_pr_0230.unwrap_or(default)) .build() } diff --git a/store/src/store.rs b/store/src/store.rs index dcc7e08d6a..18729aa7e9 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -530,7 +530,7 @@ impl<'a, S> CellProvider for CellProviderWrapper<'a, S> where S: ChainStore<'a>, { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool, _allow_in_txpool: bool) -> CellStatus { match self.0.get_cell(out_point) { Some(mut cell_meta) => { if with_data { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index bc665402d7..62ec74a7c7 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -935,9 +935,14 @@ mod tests { let cellbase = create_cellbase(shared, parent_header, number); let dao = { let snapshot: &Snapshot = &shared.snapshot(); - let resolved_cellbase = - resolve_transaction(cellbase.clone(), &mut HashSet::new(), snapshot, snapshot) - .unwrap(); + let resolved_cellbase = resolve_transaction( + cellbase.clone(), + &mut HashSet::new(), + snapshot, + snapshot, + false, + ) + .unwrap(); let data_loader = shared.store().as_data_provider(); DaoCalculator::new(shared.consensus(), &data_loader) .dao_field(&[resolved_cellbase], parent_header) diff --git a/sync/src/tests/synchronizer.rs b/sync/src/tests/synchronizer.rs index 1d47bbb4c4..c0c3856e23 100644 --- a/sync/src/tests/synchronizer.rs +++ b/sync/src/tests/synchronizer.rs @@ -146,6 +146,7 @@ fn setup_node(height: u64) -> (TestNode, Shared) { &mut HashSet::new(), snapshot.as_ref(), snapshot.as_ref(), + false, ) .unwrap(); let data_loader = snapshot.as_data_provider(); diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index 7ca67116ce..b551e78ca0 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -63,6 +63,7 @@ pub fn inherit_block(shared: &Shared, parent_hash: &Byte32) -> BlockBuilder { &mut HashSet::new(), snapshot.as_ref(), snapshot.as_ref(), + false, ) .unwrap(); let data_loader = snapshot.as_data_provider(); diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index d674cbea64..84a5a3a407 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -73,6 +73,7 @@ rfc_pr_0221 = 9_223_372_036_854_775_807 rfc_pr_0222 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 rfc_pr_0224 = 9_223_372_036_854_775_807 +rfc_pr_0228 = 9_223_372_036_854_775_807 rfc_pr_0230 = 9_223_372_036_854_775_807 [pow] diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs index 0e76fc6348..4c5a576176 100644 --- a/tx-pool/src/component/pending.rs +++ b/tx-pool/src/component/pending.rs @@ -73,7 +73,7 @@ impl PendingQueue { } impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { let tx_hash = out_point.tx_hash(); if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { match entry @@ -81,9 +81,12 @@ impl CellProvider for PendingQueue { .output_with_data(out_point.index().unpack()) { Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) + let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); + if !allow_in_txpool && !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) } None => CellStatus::Unknown, diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs index c365ccadf9..2929d18004 100644 --- a/tx-pool/src/component/proposed.rs +++ b/tx-pool/src/component/proposed.rs @@ -97,16 +97,19 @@ pub struct ProposedPool { } impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { if let Some(x) = self.edges.get_output_ref(out_point) { // output consumed if x.is_some() { CellStatus::Dead } else { let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) + let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); + if !allow_in_txpool && !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) } } else if self.edges.get_input_ref(out_point).is_some() { diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 2aa4a726af..14cad81f19 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -302,11 +302,22 @@ impl TxPool { let pending_and_proposed_provider = OverlayCellProvider::new(&self.pending, &gap_and_proposed_provider); let mut seen_inputs = HashSet::new(); + let allow_in_txpool = { + let tip_header = snapshot.tip_header(); + let consensus = snapshot.consensus(); + let proposal_window = consensus.tx_proposal_window(); + let tx_env = TxVerifyEnv::new_submit(tip_header); + let epoch_number = tx_env.epoch_number(proposal_window); + consensus + .hardfork_switch() + .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) + }; resolve_transaction( tx, &mut seen_inputs, &pending_and_proposed_provider, snapshot, + allow_in_txpool, ) .map_err(Reject::Resolve) } @@ -332,7 +343,24 @@ impl TxPool { let snapshot = self.snapshot(); let cell_provider = OverlayCellProvider::new(&self.proposed, snapshot); let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &cell_provider, snapshot).map_err(Reject::Resolve) + let allow_in_txpool = { + let tip_header = snapshot.tip_header(); + let consensus = snapshot.consensus(); + let proposal_window = consensus.tx_proposal_window(); + let tx_env = TxVerifyEnv::new_proposed(tip_header, 1); + let epoch_number = tx_env.epoch_number(proposal_window); + consensus + .hardfork_switch() + .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) + }; + resolve_transaction( + tx, + &mut seen_inputs, + &cell_provider, + snapshot, + allow_in_txpool, + ) + .map_err(Reject::Resolve) } pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 2fc1745061..8cea796808 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -176,8 +176,10 @@ impl<'a> ChainStore<'a> for Snapshot { } impl CellProvider for Snapshot { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { - self.store.cell_provider().cell(out_point, with_data) + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + self.store + .cell_provider() + .cell(out_point, with_data, allow_in_txpool) } } diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 896e456b19..466aa4f943 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -71,7 +71,7 @@ impl MockStore { } impl CellProvider for MockStore { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { match self.0.get_transaction(&out_point.tx_hash()) { Some((tx, _)) => tx .outputs() @@ -82,9 +82,12 @@ impl CellProvider for MockStore { .get(out_point.index().unpack()) .expect("output data"); - let cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) + let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) .out_point(out_point.to_owned()) .build(); + if !allow_in_txpool && !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) }) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index bdf0278df3..0850383c0a 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -372,7 +372,7 @@ where /// TODO(doc): @quake pub trait CellProvider { /// TODO(doc): @quake - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus; + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus; } /// TODO(doc): @quake @@ -400,11 +400,13 @@ where A: CellProvider, B: CellProvider, { - fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { - match self.overlay.cell(out_point, with_data) { + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + match self.overlay.cell(out_point, with_data, allow_in_txpool) { CellStatus::Live(cell_meta) => CellStatus::Live(cell_meta), CellStatus::Dead => CellStatus::Dead, - CellStatus::Unknown => self.cell_provider.cell(out_point, with_data), + CellStatus::Unknown => self + .cell_provider + .cell(out_point, with_data, allow_in_txpool), } } } @@ -452,7 +454,7 @@ impl<'a> BlockCellProvider<'a> { } impl<'a> CellProvider for BlockCellProvider<'a> { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool, _allow_in_txpool: bool) -> CellStatus { self.output_indices .get(&out_point.tx_hash()) .and_then(|i| { @@ -532,7 +534,7 @@ impl<'a> TransactionsProvider<'a> { } impl<'a> CellProvider for TransactionsProvider<'a> { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { match self.transactions.get(&out_point.tx_hash()) { Some(tx) => tx .outputs() @@ -543,7 +545,10 @@ impl<'a> CellProvider for TransactionsProvider<'a> { .get(out_point.index().unpack()) .expect("output data") .raw_data(); - let cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); + let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); + if !allow_in_txpool && !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) }) .unwrap_or(CellStatus::Unknown), @@ -622,6 +627,7 @@ pub fn resolve_transaction( seen_inputs: &mut HashSet, cell_provider: &CP, header_checker: &HC, + allow_in_txpool: bool, ) -> Result { let (mut resolved_inputs, mut resolved_cell_deps, mut resolved_dep_groups) = ( Vec::with_capacity(transaction.inputs().len()), @@ -635,7 +641,7 @@ pub fn resolve_transaction( return Err(OutPointError::Dead(out_point.clone())); } - let cell_status = cell_provider.cell(out_point, with_data); + let cell_status = cell_provider.cell(out_point, with_data, allow_in_txpool); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -737,7 +743,7 @@ fn build_cell_meta_from_out_point( cell_provider: &CP, out_point: &OutPoint, ) -> Result { - let cell_status = cell_provider.cell(out_point, true); + let cell_status = cell_provider.cell(out_point, true, false); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -855,7 +861,7 @@ mod tests { cells: HashMap>, } impl CellProvider for CellMemoryDb { - fn cell(&self, o: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, o: &OutPoint, _with_data: bool, _allow_in_txpool: bool) -> CellStatus { match self.cells.get(o) { Some(&Some(ref cell_meta)) => CellStatus::live_cell(cell_meta.clone()), Some(&None) => CellStatus::Dead, @@ -912,9 +918,9 @@ mod tests { db.cells.insert(p1.clone(), Some(o.clone())); db.cells.insert(p2.clone(), None); - assert_eq!(CellStatus::Live(o), db.cell(&p1, false)); - assert_eq!(CellStatus::Dead, db.cell(&p2, false)); - assert_eq!(CellStatus::Unknown, db.cell(&p3, false)); + assert_eq!(CellStatus::Live(o), db.cell(&p1, false, false)); + assert_eq!(CellStatus::Dead, db.cell(&p2, false, false)); + assert_eq!(CellStatus::Unknown, db.cell(&p3, false, false)); } #[test] @@ -953,6 +959,7 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, + false, ) .unwrap(); @@ -986,6 +993,7 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, + false, ); assert_error_eq!(result.unwrap_err(), OutPointError::InvalidDepGroup(op_dep)); } @@ -1015,6 +1023,7 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, + false, ); assert_error_eq!(result.unwrap_err(), OutPointError::Unknown(op_unknown),); } @@ -1041,6 +1050,7 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, + false, ); assert!(result.is_ok()); @@ -1067,6 +1077,7 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, + false, ); assert_error_eq!( @@ -1159,8 +1170,8 @@ mod tests { .build(); let mut seen_inputs = HashSet::new(); - let rtx = - resolve_transaction(tx, &mut seen_inputs, &cell_provider, &header_checker).unwrap(); + let rtx = resolve_transaction(tx, &mut seen_inputs, &cell_provider, &header_checker, false) + .unwrap(); assert_eq!(rtx.resolved_cell_deps[0], dummy_cell_meta,); } @@ -1187,12 +1198,22 @@ mod tests { .build(); let mut seen_inputs = HashSet::new(); - let result1 = - resolve_transaction(tx1, &mut seen_inputs, &cell_provider, &header_checker); + let result1 = resolve_transaction( + tx1, + &mut seen_inputs, + &cell_provider, + &header_checker, + false, + ); assert!(result1.is_ok()); - let result2 = - resolve_transaction(tx2, &mut seen_inputs, &cell_provider, &header_checker); + let result2 = resolve_transaction( + tx2, + &mut seen_inputs, + &cell_provider, + &header_checker, + false, + ); assert!(result2.is_ok()); } @@ -1208,13 +1229,23 @@ mod tests { let tx2 = TransactionBuilder::default().cell_dep(dep).build(); let mut seen_inputs = HashSet::new(); - let result1 = - resolve_transaction(tx1, &mut seen_inputs, &cell_provider, &header_checker); + let result1 = resolve_transaction( + tx1, + &mut seen_inputs, + &cell_provider, + &header_checker, + false, + ); assert!(result1.is_ok()); - let result2 = - resolve_transaction(tx2, &mut seen_inputs, &cell_provider, &header_checker); + let result2 = resolve_transaction( + tx2, + &mut seen_inputs, + &cell_provider, + &header_checker, + false, + ); assert_error_eq!(result2.unwrap_err(), OutPointError::Dead(out_point)); } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index 0049b79404..bb50c01c67 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -98,6 +98,7 @@ pub struct HardForkSwitch { rfc_pr_0222: EpochNumber, rfc_pr_0223: EpochNumber, rfc_pr_0224: EpochNumber, + rfc_pr_0228: EpochNumber, rfc_pr_0230: EpochNumber, } @@ -125,6 +126,11 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0224: Option, + /// Let the syscall `load_cell_data_hash` return correct data hash + /// for cells which are still in the tx pool and not committed yet. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0228: Option, /// Allow unknown block versions and transactions versions. /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) @@ -144,6 +150,7 @@ impl HardForkSwitch { .rfc_pr_0222(self.rfc_pr_0222()) .rfc_pr_0223(self.rfc_pr_0223()) .rfc_pr_0224(self.rfc_pr_0224()) + .rfc_pr_0228(self.rfc_pr_0228()) .rfc_pr_0230(self.rfc_pr_0230()) } @@ -155,6 +162,7 @@ impl HardForkSwitch { .disable_rfc_pr_0222() .disable_rfc_pr_0223() .disable_rfc_pr_0224() + .disable_rfc_pr_0228() .disable_rfc_pr_0230() .build() .unwrap() @@ -189,6 +197,13 @@ define_methods!( disable_rfc_pr_0224, "RFC PR 0224" ); +define_methods!( + rfc_pr_0228, + allow_cell_data_hash_in_txpool, + is_allow_cell_data_hash_in_txpool_enabled, + disable_rfc_pr_0228, + "RFC PR 0228" +); define_methods!( rfc_pr_0230, allow_unknown_versions, @@ -216,12 +231,14 @@ impl HardForkSwitchBuilder { let rfc_pr_0222 = try_find!(rfc_pr_0222); let rfc_pr_0223 = try_find!(rfc_pr_0223); let rfc_pr_0224 = try_find!(rfc_pr_0224); + let rfc_pr_0228 = try_find!(rfc_pr_0228); let rfc_pr_0230 = try_find!(rfc_pr_0230); Ok(HardForkSwitch { rfc_pr_0221, rfc_pr_0222, rfc_pr_0223, rfc_pr_0224, + rfc_pr_0228, rfc_pr_0230, }) } From 4919ae351eadff723d3880bdba1943e920090f23 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Tue, 8 Jun 2021 11:20:14 +0800 Subject: [PATCH 12/18] fix(hardfork): a mistake when use block timestamp of input cells as relative since start timestamp --- verification/src/transaction_verifier.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 30e388a1ae..2b00831e78 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -714,9 +714,9 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { let base_timestamp = if hardfork_switch .is_block_ts_as_relative_since_start_enabled(epoch_number) { - self.parent_median_time(&info.block_hash) - } else { self.parent_block_time(&info.block_hash) + } else { + self.parent_median_time(&info.block_hash) }; let current_median_time = self.block_median_time(&parent_hash); if current_median_time < base_timestamp + timestamp { From f7818724cd480565e2d9445e4726e4d51823ae4d Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Tue, 8 Jun 2021 11:30:31 +0800 Subject: [PATCH 13/18] chore(hardfork): apply review suggestions about the extra hash verification and add a unit test --- .../contextual/src/tests/uncle_verifier.rs | 59 +------- .../contextual/src/uncles_verifier.rs | 15 -- verification/src/block_verifier.rs | 14 +- verification/src/error.rs | 9 -- verification/src/tests/block_verifier.rs | 134 +++++++++++++++++- 5 files changed, 139 insertions(+), 92 deletions(-) diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index 4d563007ef..adc0a867c1 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -12,7 +12,7 @@ use ckb_types::{ BlockBuilder, BlockNumber, BlockView, EpochExt, HeaderView, TransactionBuilder, TransactionView, UncleBlockView, }, - packed::{Byte32, CellInput, ProposalShortId, Script, UncleBlockVec}, + packed::{CellInput, ProposalShortId, Script}, prelude::*, }; use ckb_verification::UnclesError; @@ -133,63 +133,6 @@ fn epoch(shared: &Shared, chain: &[BlockView], index: usize) -> EpochExt { .epoch() } -#[test] -fn test_invalid_uncle_hash_case1() { - let (shared, chain1, chain2) = prepare(); - let dummy_context = dummy_context(&shared); - - // header has uncle_count is 1 but uncles_hash is not Byte32::one() - // body has 1 uncles - let block = chain1 - .last() - .cloned() - .unwrap() - .as_advanced_builder() - .uncle(chain2.last().cloned().unwrap().as_uncle()) - .build_unchecked(); - - let epoch = epoch(&shared, &chain1, chain1.len() - 2); - let uncle_verifier_context = UncleVerifierContext::new(&dummy_context, &epoch); - let verifier = UnclesVerifier::new(uncle_verifier_context, &block); - - assert_error_eq!( - verifier.verify().unwrap_err(), - UnclesError::InvalidHash { - expected: Byte32::zero(), - actual: block.calc_uncles_hash(), - }, - ); -} - -#[test] -fn test_invalid_uncle_hash_case2() { - let (shared, chain1, chain2) = prepare(); - let dummy_context = dummy_context(&shared); - - // header has empty uncles, but the uncles hash is not matched - let uncles: UncleBlockVec = vec![chain2.last().cloned().unwrap().data().as_uncle()].pack(); - let uncles_hash = uncles.calc_uncles_hash(); - let block = chain1 - .last() - .cloned() - .unwrap() - .as_advanced_builder() - .extra_hash(uncles_hash.clone()) - .build_unchecked(); - - let epoch = epoch(&shared, &chain1, chain1.len() - 2); - let uncle_verifier_context = UncleVerifierContext::new(&dummy_context, &epoch); - let verifier = UnclesVerifier::new(uncle_verifier_context, &block); - - assert_error_eq!( - verifier.verify().unwrap_err(), - UnclesError::InvalidHash { - expected: uncles_hash, - actual: Byte32::zero(), - }, - ); -} - // Uncle is ancestor block #[test] fn test_double_inclusion() { diff --git a/verification/contextual/src/uncles_verifier.rs b/verification/contextual/src/uncles_verifier.rs index f7523a27e7..560e49e40a 100644 --- a/verification/contextual/src/uncles_verifier.rs +++ b/verification/contextual/src/uncles_verifier.rs @@ -36,7 +36,6 @@ where UnclesVerifier { provider, block } } - // - uncles_hash // - uncles_num // - depth // - uncle not in main chain @@ -44,20 +43,6 @@ where pub fn verify(&self) -> Result<(), Error> { let uncles_count = self.block.data().uncles().len() as u32; - let epoch_number = self.block.epoch().number(); - let hardfork_switch = self.provider.consensus().hardfork_switch(); - if !hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number) { - // verify uncles_hash - let actual_uncles_hash = self.block.calc_uncles_hash(); - if actual_uncles_hash != self.block.extra_hash() { - return Err(UnclesError::InvalidHash { - expected: self.block.extra_hash(), - actual: actual_uncles_hash, - } - .into()); - } - } - // if self.block.uncles is empty, return if uncles_count == 0 { return Ok(()); diff --git a/verification/src/block_verifier.rs b/verification/src/block_verifier.rs index 4c5490c490..590d739854 100644 --- a/verification/src/block_verifier.rs +++ b/verification/src/block_verifier.rs @@ -255,8 +255,10 @@ impl<'a> BlockExtensionVerifier<'a> { let epoch_number = block.epoch().number(); let hardfork_switch = self.consensus.hardfork_switch(); let extra_fields_count = block.data().count_extra_fields(); + let is_reuse_uncles_hash_as_extra_hash_enabled = + hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number); - if hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number) { + if is_reuse_uncles_hash_as_extra_hash_enabled { match extra_fields_count { 0 => {} 1 => { @@ -276,15 +278,15 @@ impl<'a> BlockExtensionVerifier<'a> { return Err(BlockErrorKind::UnknownFields.into()); } } - - let actual_extra_hash = block.calc_extra_hash().extra_hash(); - if actual_extra_hash != block.extra_hash() { - return Err(BlockErrorKind::InvalidExtraHash.into()); - } } else if extra_fields_count > 0 { return Err(BlockErrorKind::UnknownFields.into()); } + let actual_extra_hash = block.calc_extra_hash().extra_hash(); + if actual_extra_hash != block.extra_hash() { + return Err(BlockErrorKind::InvalidExtraHash.into()); + } + Ok(()) } } diff --git a/verification/src/error.rs b/verification/src/error.rs index 0ea443d7de..915a0ce431 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -202,15 +202,6 @@ pub enum UnclesError { actual: u32, }, - /// The calculated uncle-hash does not match with the one in the header. - #[error("InvalidHash(expected: {expected}, actual: {actual})")] - InvalidHash { - /// The calculated uncle-hash - expected: Byte32, - /// The actual uncle-hash - actual: Byte32, - }, - /// There is an uncle whose number is greater than or equal to current block number. #[error("InvalidNumber")] InvalidNumber, diff --git a/verification/src/tests/block_verifier.rs b/verification/src/tests/block_verifier.rs index 4320e3b6b5..44472321cd 100644 --- a/verification/src/tests/block_verifier.rs +++ b/verification/src/tests/block_verifier.rs @@ -1,14 +1,15 @@ use super::super::block_verifier::{ - BlockBytesVerifier, BlockProposalsLimitVerifier, CellbaseVerifier, DuplicateVerifier, - MerkleRootVerifier, + BlockBytesVerifier, BlockExtensionVerifier, BlockProposalsLimitVerifier, CellbaseVerifier, + DuplicateVerifier, MerkleRootVerifier, }; use crate::{BlockErrorKind, CellbaseError}; +use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_error::assert_error_eq; use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, BlockBuilder, BlockNumber, Capacity, HeaderBuilder, TransactionBuilder, - TransactionView, + capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, BlockNumber, Capacity, + EpochNumberWithFraction, HeaderBuilder, TransactionBuilder, TransactionView, }, h256, packed::{Byte32, CellInput, CellOutputBuilder, OutPoint, ProposalShortId, Script}, @@ -425,3 +426,128 @@ pub fn test_max_proposals_limit_verifier() { ); } } + +#[test] +fn test_block_extension_verifier() { + let fork_at = 10; + let epoch = EpochNumberWithFraction::new(fork_at, 0, 1); + + // normal block (no uncles) + let header = HeaderBuilder::default().epoch(epoch.pack()).build(); + let block = BlockBuilder::default().header(header).build(); + + // invalid extra hash (no extension) + let header1 = block + .header() + .as_advanced_builder() + .extra_hash(h256!("0x1").pack()) + .build(); + let block1 = BlockBuilder::default().header(header1).build_unchecked(); + + // empty extension + let block2 = block + .as_advanced_builder() + .extension(Some(Default::default())) + .build(); + // extension has only 1 byte + let block3 = block + .as_advanced_builder() + .extension(Some(vec![0u8].pack())) + .build(); + // extension has 96 bytes + let block4 = block + .as_advanced_builder() + .extension(Some(vec![0u8; 96].pack())) + .build(); + // extension has 97 bytes + let block5 = block + .as_advanced_builder() + .extension(Some(vec![0u8; 97].pack())) + .build(); + + // normal block (with uncles) + let block6 = block + .as_advanced_builder() + .uncle(BlockBuilder::default().build().as_uncle()) + .build(); + + // invalid extra hash (has extension but use uncles hash) + let block7 = block6 + .as_advanced_builder() + .extension(Some(vec![0u8; 32].pack())) + .build_unchecked(); + + { + // Test CKB v2019 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0224(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block1); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block2); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block3); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block4); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block5); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block6); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block7); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + } + { + // Test CKB v2021 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0224(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block1); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block2); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::EmptyBlockExtension); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block3); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block4); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block5); + assert_error_eq!( + result.unwrap_err(), + BlockErrorKind::ExceededMaximumBlockExtensionBytes + ); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block6); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block7); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + } +} From 8239d1d9620c96d8bd9d3ca2ba41fd52e8e9cad6 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 10 Jun 2021 17:19:49 +0800 Subject: [PATCH 14/18] Revert "feat(hardfork): allow unknown block versions and transactions versions" This reverts commit d1c0bbece4ac1bebdac968dbe494936010b160c2. --- rpc/src/module/chain.rs | 5 +- spec/src/consensus.rs | 96 +++++----- spec/src/hardfork.rs | 6 +- test/src/main.rs | 2 - test/src/specs/hardfork/v2021/mod.rs | 2 - test/src/specs/hardfork/v2021/version.rs | 181 ------------------ test/src/specs/rpc/get_block_template.rs | 2 +- test/template/specs/integration.toml | 1 - tx-pool/src/block_assembler/mod.rs | 24 +-- tx-pool/src/process.rs | 5 - util/types/src/constants.rs | 8 + util/types/src/core/advanced_builders.rs | 20 +- util/types/src/core/error.rs | 12 +- util/types/src/core/hardfork.rs | 16 -- util/types/src/lib.rs | 1 + verification/src/header_verifier.rs | 38 ++-- verification/src/tests/header_verifier.rs | 64 ++----- .../src/tests/transaction_verifier.rs | 69 ++----- verification/src/transaction_verifier.rs | 48 ++--- 19 files changed, 142 insertions(+), 458 deletions(-) delete mode 100644 test/src/specs/hardfork/v2021/version.rs create mode 100644 util/types/src/constants.rs diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 2a43c508a5..853a3a07a5 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1618,9 +1618,8 @@ impl ChainRpc for ChainRpcImpl { } fn get_consensus(&self) -> Result { - let consensus = self.shared.consensus(); - let epoch_number = self.shared.snapshot().tip_header().epoch().number(); - Ok(consensus.to_json(epoch_number)) + let consensus = self.shared.consensus().clone(); + Ok(consensus.into()) } fn get_block_median_time(&self, block_hash: H256) -> Result> { diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index ab54d5f98a..2bf066b662 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -14,6 +14,7 @@ use ckb_resource::Resource; use ckb_traits::{BlockEpoch, EpochProvider}; use ckb_types::{ bytes::Bytes, + constants::{BLOCK_VERSION, TX_VERSION}, core::{ hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, EpochNumber, EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, @@ -260,6 +261,8 @@ impl ConsensusBuilder { secp256k1_blake160_sighash_all_type_hash: None, secp256k1_blake160_multisig_all_type_hash: None, genesis_epoch_ext, + block_version: BLOCK_VERSION, + tx_version: TX_VERSION, type_id_code_hash: TYPE_ID_CODE_HASH, proposer_reward_ratio: PROPOSER_REWARD_RATIO, max_block_proposals_limit: MAX_BLOCK_PROPOSALS_LIMIT, @@ -510,6 +513,10 @@ pub struct Consensus { pub max_block_cycles: Cycle, /// Maximum number of bytes to use for the entire block pub max_block_bytes: u64, + /// The block version number supported + pub block_version: Version, + /// The tx version number supported + pub tx_version: Version, /// The "TYPE_ID" in hex pub type_id_code_hash: H256, /// The Limit to the number of proposals per block @@ -687,13 +694,13 @@ impl Consensus { } /// The current block version - pub fn block_version(&self, _epoch_number: EpochNumber) -> Version { - 0 + pub fn block_version(&self) -> Version { + self.block_version } /// The current transaction version - pub fn tx_version(&self, _epoch_number: EpochNumber) -> Version { - 0 + pub fn tx_version(&self) -> Version { + self.tx_version } /// The "TYPE_ID" in hex @@ -923,46 +930,6 @@ impl Consensus { pub fn hardfork_switch(&self) -> &HardForkSwitch { &self.hardfork_switch } - - /// Convert to a JSON type with an input epoch number as the tip epoch number. - pub fn to_json(&self, epoch_number: EpochNumber) -> ckb_jsonrpc_types::Consensus { - ckb_jsonrpc_types::Consensus { - id: self.id.clone(), - genesis_hash: self.genesis_hash.unpack(), - dao_type_hash: self.dao_type_hash().map(|h| h.unpack()), - secp256k1_blake160_sighash_all_type_hash: self - .secp256k1_blake160_sighash_all_type_hash() - .map(|h| h.unpack()), - secp256k1_blake160_multisig_all_type_hash: self - .secp256k1_blake160_multisig_all_type_hash() - .map(|h| h.unpack()), - initial_primary_epoch_reward: self.initial_primary_epoch_reward.into(), - secondary_epoch_reward: self.secondary_epoch_reward.into(), - max_uncles_num: (self.max_uncles_num as u64).into(), - orphan_rate_target: self.orphan_rate_target().to_owned(), - epoch_duration_target: self.epoch_duration_target.into(), - tx_proposal_window: ckb_jsonrpc_types::ProposalWindow { - closest: self.tx_proposal_window.0.into(), - farthest: self.tx_proposal_window.1.into(), - }, - proposer_reward_ratio: RationalU256::new_raw( - self.proposer_reward_ratio.numer().into(), - self.proposer_reward_ratio.denom().into(), - ), - cellbase_maturity: self.cellbase_maturity.into(), - median_time_block_count: (self.median_time_block_count as u64).into(), - max_block_cycles: self.max_block_cycles.into(), - max_block_bytes: self.max_block_bytes.into(), - block_version: self.block_version(epoch_number).into(), - tx_version: self.tx_version(epoch_number).into(), - type_id_code_hash: self.type_id_code_hash().to_owned(), - max_block_proposals_limit: self.max_block_proposals_limit.into(), - primary_epoch_reward_halving_interval: self - .primary_epoch_reward_halving_interval - .into(), - permanent_difficulty_in_dummy: self.permanent_difficulty_in_dummy, - } - } } /// Trait for consensus provider. @@ -994,6 +961,47 @@ impl NextBlockEpoch { } } +impl From for ckb_jsonrpc_types::Consensus { + fn from(consensus: Consensus) -> Self { + Self { + id: consensus.id, + genesis_hash: consensus.genesis_hash.unpack(), + dao_type_hash: consensus.dao_type_hash.map(|h| h.unpack()), + secp256k1_blake160_sighash_all_type_hash: consensus + .secp256k1_blake160_sighash_all_type_hash + .map(|h| h.unpack()), + secp256k1_blake160_multisig_all_type_hash: consensus + .secp256k1_blake160_multisig_all_type_hash + .map(|h| h.unpack()), + initial_primary_epoch_reward: consensus.initial_primary_epoch_reward.into(), + secondary_epoch_reward: consensus.secondary_epoch_reward.into(), + max_uncles_num: (consensus.max_uncles_num as u64).into(), + orphan_rate_target: consensus.orphan_rate_target, + epoch_duration_target: consensus.epoch_duration_target.into(), + tx_proposal_window: ckb_jsonrpc_types::ProposalWindow { + closest: consensus.tx_proposal_window.0.into(), + farthest: consensus.tx_proposal_window.1.into(), + }, + proposer_reward_ratio: RationalU256::new_raw( + consensus.proposer_reward_ratio.numer().into(), + consensus.proposer_reward_ratio.denom().into(), + ), + cellbase_maturity: consensus.cellbase_maturity.into(), + median_time_block_count: (consensus.median_time_block_count as u64).into(), + max_block_cycles: consensus.max_block_cycles.into(), + max_block_bytes: consensus.max_block_bytes.into(), + block_version: consensus.block_version.into(), + tx_version: consensus.tx_version.into(), + type_id_code_hash: consensus.type_id_code_hash, + max_block_proposals_limit: consensus.max_block_proposals_limit.into(), + primary_epoch_reward_halving_interval: consensus + .primary_epoch_reward_halving_interval + .into(), + permanent_difficulty_in_dummy: consensus.permanent_difficulty_in_dummy, + } + } +} + // most simple and efficient way for now fn u256_low_u64(u: U256) -> u64 { u.0[0] diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 731d0714a6..3a669f258f 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -22,8 +22,6 @@ pub struct HardForkConfig { pub rfc_pr_0224: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0228: Option, - /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) - pub rfc_pr_0230: Option, } macro_rules! check_default { @@ -71,8 +69,7 @@ impl HardForkConfig { .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)) - .rfc_pr_0228(check_default!(self, rfc_pr_0228, ckb2021)) - .rfc_pr_0230(check_default!(self, rfc_pr_0230, ckb2021)); + .rfc_pr_0228(check_default!(self, rfc_pr_0228, ckb2021)); Ok(builder) } @@ -86,7 +83,6 @@ impl HardForkConfig { .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .rfc_pr_0224(self.rfc_pr_0224.unwrap_or(default)) .rfc_pr_0228(self.rfc_pr_0228.unwrap_or(default)) - .rfc_pr_0230(self.rfc_pr_0230.unwrap_or(default)) .build() } } diff --git a/test/src/main.rs b/test/src/main.rs index 9867d4afa3..d2dc560bec 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -492,8 +492,6 @@ fn all_specs() -> Vec> { Box::new(CheckAbsoluteEpochSince), Box::new(CheckRelativeEpochSince), Box::new(CheckBlockExtension), - Box::new(CheckBlockVersion), - Box::new(CheckTxVersion), Box::new(DuplicateCellDepsForDataHashTypeLockScript), Box::new(DuplicateCellDepsForDataHashTypeTypeScript), Box::new(DuplicateCellDepsForTypeHashTypeLockScript), diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs index f2d42d9e7f..0b4de99a0b 100644 --- a/test/src/specs/hardfork/v2021/mod.rs +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -1,7 +1,6 @@ mod cell_deps; mod extension; mod since; -mod version; pub use cell_deps::{ DuplicateCellDepsForDataHashTypeLockScript, DuplicateCellDepsForDataHashTypeTypeScript, @@ -9,4 +8,3 @@ pub use cell_deps::{ }; pub use extension::CheckBlockExtension; pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; -pub use version::{CheckBlockVersion, CheckTxVersion}; diff --git a/test/src/specs/hardfork/v2021/version.rs b/test/src/specs/hardfork/v2021/version.rs deleted file mode 100644 index f2451b8d36..0000000000 --- a/test/src/specs/hardfork/v2021/version.rs +++ /dev/null @@ -1,181 +0,0 @@ -use crate::util::{ - check::{assert_epoch_should_be, assert_submit_block_fail, assert_submit_block_ok}, - mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, -}; -use crate::utils::assert_send_transaction_fail; -use crate::{Node, Spec}; -use ckb_logger::info; -use ckb_types::{ - core::{BlockView, TransactionView, Version}, - packed, - prelude::*, -}; - -const GENESIS_EPOCH_LENGTH: u64 = 10; - -const ERROR_BLOCK_VERSION: &str = "Invalid: Header(Version(BlockVersionError("; -const ERROR_TX_VERSION: &str = - "TransactionFailedToVerify: Verification failed Transaction(MismatchedVersion"; - -pub struct CheckBlockVersion; -pub struct CheckTxVersion; - -impl Spec for CheckBlockVersion { - fn run(&self, nodes: &mut Vec) { - let node = &nodes[0]; - let epoch_length = GENESIS_EPOCH_LENGTH; - - mine_until_out_bootstrap_period(node); - - assert_epoch_should_be(node, 1, 2, epoch_length); - { - info!("CKB v2019, submit block with version 1 is failed"); - let block = create_block_with_version(node, 1); - assert_submit_block_fail(node, &block, ERROR_BLOCK_VERSION); - } - assert_epoch_should_be(node, 1, 2, epoch_length); - { - info!("CKB v2019, submit block with version 0 is passed"); - let block = create_block_with_version(node, 0); - assert_submit_block_ok(node, &block); - } - assert_epoch_should_be(node, 1, 3, epoch_length); - mine_until_epoch(node, 1, epoch_length - 2, epoch_length); - { - info!("CKB v2019, submit block with version 1 is failed (boundary)"); - let block = create_block_with_version(node, 1); - assert_submit_block_fail(node, &block, ERROR_BLOCK_VERSION); - } - assert_epoch_should_be(node, 1, epoch_length - 2, epoch_length); - { - info!("CKB v2019, submit block with version 0 is passed (boundary)"); - let block = create_block_with_version(node, 0); - assert_submit_block_ok(node, &block); - } - assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); - { - info!("CKB v2021, submit block with version 1 is passed (boundary)"); - let block = create_block_with_version(node, 1); - assert_submit_block_ok(node, &block); - } - assert_epoch_should_be(node, 2, 0, epoch_length); - { - info!("CKB v2021, submit block with version 0 is passed (boundary)"); - let block = create_block_with_version(node, 0); - assert_submit_block_ok(node, &block); - } - assert_epoch_should_be(node, 2, 1, epoch_length); - } - - fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { - spec.params.permanent_difficulty_in_dummy = Some(true); - spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); - if let Some(mut switch) = spec.params.hardfork.as_mut() { - switch.rfc_pr_0230 = Some(2); - } - } -} - -impl Spec for CheckTxVersion { - fn run(&self, nodes: &mut Vec) { - let node = &nodes[0]; - let epoch_length = GENESIS_EPOCH_LENGTH; - - mine_until_out_bootstrap_period(node); - - assert_epoch_should_be(node, 1, 2, epoch_length); - { - let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); - - info!("CKB v2019, submit transaction with version 1 is failed"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); - assert_send_transaction_fail(node, &tx, ERROR_TX_VERSION); - - info!("CKB v2019, submit block with version 0 is passed"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); - let res = node.rpc_client().send_transaction_result(tx.data().into()); - assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); - } - mine_until_epoch(node, 1, epoch_length - 4, epoch_length); - { - let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); - - info!("CKB v2019, submit transaction with version 1 is failed (boundary)"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); - assert_send_transaction_fail(node, &tx, ERROR_TX_VERSION); - - info!("CKB v2019, submit block with version 0 is passed (boundary)"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); - let res = node.rpc_client().send_transaction_result(tx.data().into()); - assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); - } - mine(node, 1); - assert_epoch_should_be(node, 1, epoch_length - 3, epoch_length); - { - let input_cell_hash = &node.get_tip_block().transactions()[0].hash(); - info!("CKB v2021, submit transaction with version 1 is passed (boundary)"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 1); - let res = node.rpc_client().send_transaction_result(tx.data().into()); - assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); - - let input_cell_hash = &tx.hash(); - info!("CKB v2021, submit block with version 0 is passed (boundary)"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 0); - let res = node.rpc_client().send_transaction_result(tx.data().into()); - assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); - - let input_cell_hash = &tx.hash(); - info!("CKB v2021, submit transaction with version 100 is passed (boundary)"); - let tx = create_transaction_with_version(node, input_cell_hash.clone(), 0, 100); - let res = node.rpc_client().send_transaction_result(tx.data().into()); - assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); - } - } - - fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { - spec.params.permanent_difficulty_in_dummy = Some(true); - spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); - if let Some(mut switch) = spec.params.hardfork.as_mut() { - switch.rfc_pr_0230 = Some(2); - } - } -} - -fn create_block_with_version(node: &Node, version: Version) -> BlockView { - node.new_block_builder(None, None, None) - .version(version.pack()) - .build() -} - -fn create_transaction_with_version( - node: &Node, - hash: packed::Byte32, - index: u32, - version: Version, -) -> TransactionView { - let always_success_cell_dep = node.always_success_cell_dep(); - let always_success_script = node.always_success_script(); - - let input_cell = node - .rpc_client() - .get_transaction(hash.clone()) - .unwrap() - .transaction - .inner - .outputs[index as usize] - .to_owned(); - - let cell_input = packed::CellInput::new(packed::OutPoint::new(hash, index), 0); - let cell_output = packed::CellOutput::new_builder() - .capacity((input_cell.capacity.value() - 1).pack()) - .lock(always_success_script) - .build(); - - TransactionView::new_advanced_builder() - .version(version.pack()) - .cell_dep(always_success_cell_dep) - .input(cell_input) - .output(cell_output) - .output_data(Default::default()) - .build() -} diff --git a/test/src/specs/rpc/get_block_template.rs b/test/src/specs/rpc/get_block_template.rs index 50b58842b6..4b6070e695 100644 --- a/test/src/specs/rpc/get_block_template.rs +++ b/test/src/specs/rpc/get_block_template.rs @@ -12,7 +12,7 @@ impl Spec for RpcGetBlockTemplate { let node0 = &nodes[0]; let default_bytes_limit = node0.consensus().max_block_bytes; let default_cycles_limit = node0.consensus().max_block_cycles; - let default_block_version = node0.consensus().block_version(0); + let default_block_version = node0.consensus().block_version; let epoch_length = node0.consensus().genesis_epoch_ext().length(); // get block template when tip block is genesis diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index 84a5a3a407..b764e964f2 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -74,7 +74,6 @@ rfc_pr_0222 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 rfc_pr_0224 = 9_223_372_036_854_775_807 rfc_pr_0228 = 9_223_372_036_854_775_807 -rfc_pr_0230 = 9_223_372_036_854_775_807 [pow] func = "Dummy" diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index 0b28d2bf8a..2872dec7af 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -13,8 +13,8 @@ use ckb_store::ChainStore; use ckb_types::{ bytes::Bytes, core::{ - BlockNumber, Capacity, Cycle, EpochExt, EpochNumber, HeaderView, TransactionBuilder, - TransactionView, UncleBlockView, Version, + BlockNumber, Capacity, Cycle, EpochExt, HeaderView, TransactionBuilder, TransactionView, + UncleBlockView, Version, }, packed::{self, Byte32, CellInput, CellOutput, CellbaseWitness, ProposalShortId, Transaction}, prelude::*, @@ -73,22 +73,16 @@ impl BlockAssembler { bytes_limit: Option, proposals_limit: Option, max_version: Option, - epoch_number: EpochNumber, ) -> (u64, u64, Version) { - let bytes_limit = { - let default_bytes_limit = consensus.max_block_bytes(); - bytes_limit - .min(Some(default_bytes_limit)) - .unwrap_or(default_bytes_limit) - }; - let default_proposals_limit = consensus.max_block_proposals_limit(); + let bytes_limit = bytes_limit + .min(Some(consensus.max_block_bytes())) + .unwrap_or_else(|| consensus.max_block_bytes()); let proposals_limit = proposals_limit - .min(Some(default_proposals_limit)) - .unwrap_or(default_proposals_limit); - let default_block_version = consensus.block_version(epoch_number); + .min(Some(consensus.max_block_proposals_limit())) + .unwrap_or_else(|| consensus.max_block_proposals_limit()); let version = max_version - .min(Some(default_block_version)) - .unwrap_or(default_block_version); + .min(Some(consensus.block_version())) + .unwrap_or_else(|| consensus.block_version()); (bytes_limit, proposals_limit, version) } diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 92d1d1daf4..e881141032 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -297,16 +297,11 @@ impl TxPoolService { let snapshot = self.snapshot(); let consensus = snapshot.consensus(); let cycles_limit = consensus.max_block_cycles(); - let epoch_number_of_next_block = snapshot - .tip_header() - .epoch() - .minimum_epoch_number_after_n_blocks(1); let (bytes_limit, proposals_limit, version) = BlockAssembler::transform_params( consensus, bytes_limit, proposals_limit, max_version, - epoch_number_of_next_block, ); if let Some(cache) = self diff --git a/util/types/src/constants.rs b/util/types/src/constants.rs new file mode 100644 index 0000000000..44d5527897 --- /dev/null +++ b/util/types/src/constants.rs @@ -0,0 +1,8 @@ +//! All Constants. + +use crate::core::Version; + +/// Current transaction version. +pub const TX_VERSION: Version = 0; +/// Current block version. +pub const BLOCK_VERSION: Version = 0; diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index e42697c1a2..85b1a8eede 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -1,7 +1,7 @@ //! Advanced builders for Transaction(View), Header(View) and Block(View). use crate::{ - core, packed, + constants, core, packed, prelude::*, utilities::{merkle_root, DIFF_TWO}, }; @@ -16,7 +16,7 @@ use crate::{ /// /// [`TransactionView`]: struct.TransactionView.html /// [`packed::TransactionBuilder`]: ../packed/struct.TransactionBuilder.html -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct TransactionBuilder { pub(crate) version: packed::Uint32, pub(crate) cell_deps: Vec, @@ -70,10 +70,24 @@ pub struct BlockBuilder { * Implement std traits. */ +impl ::std::default::Default for TransactionBuilder { + fn default() -> Self { + Self { + version: constants::TX_VERSION.pack(), + cell_deps: Default::default(), + header_deps: Default::default(), + inputs: Default::default(), + outputs: Default::default(), + witnesses: Default::default(), + outputs_data: Default::default(), + } + } +} + impl ::std::default::Default for HeaderBuilder { fn default() -> Self { Self { - version: Default::default(), + version: constants::BLOCK_VERSION.pack(), parent_hash: Default::default(), timestamp: Default::default(), number: Default::default(), diff --git a/util/types/src/core/error.rs b/util/types/src/core/error.rs index bb86de3854..24d19cbcdd 100644 --- a/util/types/src/core/error.rs +++ b/util/types/src/core/error.rs @@ -159,15 +159,6 @@ pub enum TransactionError { actual: Version, }, - /// The transaction version is too low, it's deprecated. - #[error("DeprecatedVersion: minimum {}, got {}", minimum, actual)] - DeprecatedVersion { - /// The minimum supported transaction version. - minimum: Version, - /// The actual transaction version. - actual: Version, - }, - /// The transaction size exceeds limit. #[error("ExceededMaximumBlockBytes: expected transaction serialized size ({actual}) < block size limit ({limit})")] ExceededMaximumBlockBytes { @@ -195,8 +186,7 @@ impl TransactionError { TransactionError::Immature { .. } | TransactionError::CellbaseImmaturity { .. } - | TransactionError::MismatchedVersion { .. } - | TransactionError::DeprecatedVersion { .. } => false, + | TransactionError::MismatchedVersion { .. } => false, } } } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index bb50c01c67..5611f30df5 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -99,7 +99,6 @@ pub struct HardForkSwitch { rfc_pr_0223: EpochNumber, rfc_pr_0224: EpochNumber, rfc_pr_0228: EpochNumber, - rfc_pr_0230: EpochNumber, } /// Builder for [`HardForkSwitch`]. @@ -131,10 +130,6 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0228: Option, - /// Allow unknown block versions and transactions versions. - /// - /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) - pub rfc_pr_0230: Option, } impl HardForkSwitch { @@ -151,7 +146,6 @@ impl HardForkSwitch { .rfc_pr_0223(self.rfc_pr_0223()) .rfc_pr_0224(self.rfc_pr_0224()) .rfc_pr_0228(self.rfc_pr_0228()) - .rfc_pr_0230(self.rfc_pr_0230()) } /// Creates a new instance that all hard fork features are disabled forever. @@ -163,7 +157,6 @@ impl HardForkSwitch { .disable_rfc_pr_0223() .disable_rfc_pr_0224() .disable_rfc_pr_0228() - .disable_rfc_pr_0230() .build() .unwrap() } @@ -204,13 +197,6 @@ define_methods!( disable_rfc_pr_0228, "RFC PR 0228" ); -define_methods!( - rfc_pr_0230, - allow_unknown_versions, - is_allow_unknown_versions_enabled, - disable_rfc_pr_0230, - "RFC PR 0230" -); impl HardForkSwitchBuilder { /// Build a new [`HardForkSwitch`]. @@ -232,14 +218,12 @@ impl HardForkSwitchBuilder { let rfc_pr_0223 = try_find!(rfc_pr_0223); let rfc_pr_0224 = try_find!(rfc_pr_0224); let rfc_pr_0228 = try_find!(rfc_pr_0228); - let rfc_pr_0230 = try_find!(rfc_pr_0230); Ok(HardForkSwitch { rfc_pr_0221, rfc_pr_0222, rfc_pr_0223, rfc_pr_0224, rfc_pr_0228, - rfc_pr_0230, }) } } diff --git a/util/types/src/lib.rs b/util/types/src/lib.rs index 7849f7cf19..1e2fc209ad 100644 --- a/util/types/src/lib.rs +++ b/util/types/src/lib.rs @@ -14,6 +14,7 @@ mod generated; pub use generated::packed; pub mod core; +pub mod constants; mod conversion; mod extension; pub mod utilities; diff --git a/verification/src/header_verifier.rs b/verification/src/header_verifier.rs index ba93bd43ba..ee34525dc1 100644 --- a/verification/src/header_verifier.rs +++ b/verification/src/header_verifier.rs @@ -6,7 +6,7 @@ use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; use ckb_pow::PowEngine; use ckb_traits::HeaderProvider; -use ckb_types::core::HeaderView; +use ckb_types::core::{HeaderView, Version}; use ckb_verification_traits::Verifier; use faketime::unix_time_as_millis; @@ -31,7 +31,7 @@ impl<'a, DL: HeaderProvider> HeaderVerifier<'a, DL> { impl<'a, DL: HeaderProvider> Verifier for HeaderVerifier<'a, DL> { type Target = HeaderView; fn verify(&self, header: &Self::Target) -> Result<(), Error> { - VersionVerifier::new(header, self.consensus).verify()?; + VersionVerifier::new(header, self.consensus.block_version()).verify()?; // POW check first PowVerifier::new(header, self.consensus.pow_engine().as_ref()).verify()?; let parent = self @@ -53,36 +53,26 @@ impl<'a, DL: HeaderProvider> Verifier for HeaderVerifier<'a, DL> { pub struct VersionVerifier<'a> { header: &'a HeaderView, - consensus: &'a Consensus, + block_version: Version, } impl<'a> VersionVerifier<'a> { - pub fn new(header: &'a HeaderView, consensus: &'a Consensus) -> Self { - VersionVerifier { header, consensus } + pub fn new(header: &'a HeaderView, block_version: Version) -> Self { + VersionVerifier { + header, + block_version, + } } pub fn verify(&self) -> Result<(), Error> { - let epoch_number = self.header.epoch().number(); - let target = self.consensus.block_version(epoch_number); - let actual = self.header.version(); - let failed = if self - .consensus - .hardfork_switch() - .is_allow_unknown_versions_enabled(epoch_number) - { - actual < target - } else { - actual != target - }; - if failed { - Err(BlockVersionError { - expected: target, - actual, + if self.header.version() != self.block_version { + return Err(BlockVersionError { + expected: self.block_version, + actual: self.header.version(), } - .into()) - } else { - Ok(()) + .into()); } + Ok(()) } } diff --git a/verification/src/tests/header_verifier.rs b/verification/src/tests/header_verifier.rs index 5eaed40491..3a1cbf8f32 100644 --- a/verification/src/tests/header_verifier.rs +++ b/verification/src/tests/header_verifier.rs @@ -1,14 +1,9 @@ use crate::header_verifier::{NumberVerifier, PowVerifier, TimestampVerifier, VersionVerifier}; use crate::{BlockVersionError, NumberError, PowError, TimestampError, ALLOWED_FUTURE_BLOCKTIME}; -use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_error::assert_error_eq; use ckb_pow::PowEngine; use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; -use ckb_types::{ - core::{hardfork::HardForkSwitch, EpochNumberWithFraction, HeaderBuilder}, - packed::Header, - prelude::*, -}; +use ckb_types::{constants::BLOCK_VERSION, core::HeaderBuilder, packed::Header, prelude::*}; use faketime::unix_time_as_millis; fn mock_median_time_context() -> MockMedianTime { @@ -19,53 +14,18 @@ fn mock_median_time_context() -> MockMedianTime { #[test] pub fn test_version() { - let fork_at = 10; - let default_block_version = ConsensusBuilder::default().build().block_version(fork_at); - let epoch = EpochNumberWithFraction::new(fork_at, 0, 10); - let header1 = HeaderBuilder::default() - .version(default_block_version.pack()) - .epoch(epoch.pack()) - .build(); - let header2 = HeaderBuilder::default() - .version((default_block_version + 1).pack()) - .epoch(epoch.pack()) + let header = HeaderBuilder::default() + .version((BLOCK_VERSION + 1).pack()) .build(); - { - let hardfork_switch = HardForkSwitch::new_without_any_enabled() - .as_builder() - .rfc_pr_0230(fork_at + 1) - .build() - .unwrap(); - let consensus = ConsensusBuilder::default() - .hardfork_switch(hardfork_switch) - .build(); - let result = VersionVerifier::new(&header1, &consensus).verify(); - assert!(result.is_ok(), "result = {:?}", result); - - let result = VersionVerifier::new(&header2, &consensus).verify(); - assert_error_eq!( - result.unwrap_err(), - BlockVersionError { - expected: default_block_version, - actual: default_block_version + 1 - } - ); - } - { - let hardfork_switch = HardForkSwitch::new_without_any_enabled() - .as_builder() - .rfc_pr_0230(fork_at) - .build() - .unwrap(); - let consensus = ConsensusBuilder::default() - .hardfork_switch(hardfork_switch) - .build(); - let result = VersionVerifier::new(&header1, &consensus).verify(); - assert!(result.is_ok(), "result = {:?}", result); - - let result = VersionVerifier::new(&header2, &consensus).verify(); - assert!(result.is_ok(), "result = {:?}", result); - } + let verifier = VersionVerifier::new(&header, BLOCK_VERSION); + + assert_error_eq!( + verifier.verify().unwrap_err(), + BlockVersionError { + expected: BLOCK_VERSION, + actual: BLOCK_VERSION + 1 + } + ); } #[cfg(not(disable_faketime))] diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index f3ea06ed4b..bda3972923 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -10,6 +10,7 @@ use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; use ckb_traits::HeaderProvider; use ckb_types::{ bytes::Bytes, + constants::TX_VERSION, core::{ capacity_bytes, cell::{CellMetaBuilder, ResolvedTransaction}, @@ -39,66 +40,18 @@ pub fn test_empty() { #[test] pub fn test_version() { - let fork_at = 10; - let default_tx_version = ConsensusBuilder::default().build().tx_version(fork_at); - let tx1 = TransactionBuilder::default() - .version(default_tx_version.pack()) - .build(); - let rtx1 = create_resolve_tx_with_transaction_info( - &tx1, - MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), - ); - let tx2 = TransactionBuilder::default() - .version((default_tx_version + 1).pack()) + let transaction = TransactionBuilder::default() + .version((TX_VERSION + 1).pack()) .build(); - let rtx2 = create_resolve_tx_with_transaction_info( - &tx2, - MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), - ); - let tx_env = { - let epoch = EpochNumberWithFraction::new(fork_at, 0, 10); - let header = HeaderView::new_advanced_builder() - .epoch(epoch.pack()) - .build(); - TxVerifyEnv::new_commit(&header) - }; - - { - let hardfork_switch = HardForkSwitch::new_without_any_enabled() - .as_builder() - .rfc_pr_0230(fork_at + 1) - .build() - .unwrap(); - let consensus = ConsensusBuilder::default() - .hardfork_switch(hardfork_switch) - .build(); - let result = VersionVerifier::new(&rtx1, &consensus, &tx_env).verify(); - assert!(result.is_ok(), "result = {:?}", result); + let verifier = VersionVerifier::new(&transaction, TX_VERSION); - let result = VersionVerifier::new(&rtx2, &consensus, &tx_env).verify(); - assert_error_eq!( - result.unwrap_err(), - TransactionError::MismatchedVersion { - expected: default_tx_version, - actual: default_tx_version + 1 - }, - ); - } - { - let hardfork_switch = HardForkSwitch::new_without_any_enabled() - .as_builder() - .rfc_pr_0230(fork_at) - .build() - .unwrap(); - let consensus = ConsensusBuilder::default() - .hardfork_switch(hardfork_switch) - .build(); - let result = VersionVerifier::new(&rtx1, &consensus, &tx_env).verify(); - assert!(result.is_ok(), "result = {:?}", result); - - let result = VersionVerifier::new(&rtx2, &consensus, &tx_env).verify(); - assert!(result.is_ok(), "result = {:?}", result); - } + assert_error_eq!( + verifier.verify().unwrap_err(), + TransactionError::MismatchedVersion { + expected: 0, + actual: 1 + }, + ); } #[test] diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 2b00831e78..affd5d274d 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -10,7 +10,7 @@ use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; use ckb_types::{ core::{ cell::{CellMeta, ResolvedTransaction}, - Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, + Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, Version, }, packed::Byte32, prelude::*, @@ -51,11 +51,13 @@ impl<'a, DL: HeaderProvider + ConsensusProvider> TimeRelativeTransactionVerifier /// /// Basic checks that don't depend on any context /// Contains: +/// - Check for version /// - Check for size /// - Check inputs and output empty /// - Check for duplicate deps /// - Check for whether outputs match data pub struct NonContextualTransactionVerifier<'a> { + pub(crate) version: VersionVerifier<'a>, pub(crate) size: SizeVerifier<'a>, pub(crate) empty: EmptyVerifier<'a>, pub(crate) duplicate_deps: DuplicateDepsVerifier<'a>, @@ -66,6 +68,7 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Creates a new NonContextualTransactionVerifier pub fn new(tx: &'a TransactionView, consensus: &'a Consensus) -> Self { NonContextualTransactionVerifier { + version: VersionVerifier::new(tx, consensus.tx_version()), size: SizeVerifier::new(tx, consensus.max_block_bytes()), empty: EmptyVerifier::new(tx), duplicate_deps: DuplicateDepsVerifier::new(tx), @@ -75,6 +78,7 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Perform context-independent verification pub fn verify(&self) -> Result<(), Error> { + self.version.verify()?; self.size.verify()?; self.empty.verify()?; self.duplicate_deps.verify()?; @@ -86,14 +90,12 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Context-dependent verification checks for transaction /// /// Contains: -/// [`VersionVerifier`](./struct.VersionVerifier.html) /// [`MaturityVerifier`](./struct.MaturityVerifier.html) /// [`SinceVerifier`](./struct.SinceVerifier.html) /// [`CapacityVerifier`](./struct.CapacityVerifier.html) /// [`ScriptVerifier`](./struct.ScriptVerifier.html) /// [`FeeCalculator`](./struct.FeeCalculator.html) pub struct ContextualTransactionVerifier<'a, DL> { - pub(crate) version: VersionVerifier<'a>, pub(crate) maturity: MaturityVerifier<'a>, pub(crate) since: SinceVerifier<'a, DL>, pub(crate) capacity: CapacityVerifier<'a>, @@ -113,7 +115,6 @@ where tx_env: &'a TxVerifyEnv, ) -> Self { ContextualTransactionVerifier { - version: VersionVerifier::new(&rtx, consensus, tx_env), maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), script: ScriptVerifier::new(rtx, consensus, data_loader, tx_env), capacity: CapacityVerifier::new(rtx, consensus.dao_type_hash()), @@ -127,7 +128,6 @@ where /// skip script verify will result in the return value cycle always is zero pub fn verify(&self, max_cycles: Cycle, skip_script_verify: bool) -> Result { let timer = Timer::start(); - self.version.verify()?; self.maturity.verify()?; self.capacity.verify()?; self.since.verify()?; @@ -203,45 +203,23 @@ impl<'a, DL: CellDataProvider + HeaderProvider + EpochProvider> FeeCalculator<'a } pub struct VersionVerifier<'a> { - rtx: &'a ResolvedTransaction, - consensus: &'a Consensus, - tx_env: &'a TxVerifyEnv, + transaction: &'a TransactionView, + tx_version: Version, } impl<'a> VersionVerifier<'a> { - pub fn new( - rtx: &'a ResolvedTransaction, - consensus: &'a Consensus, - tx_env: &'a TxVerifyEnv, - ) -> Self { + pub fn new(transaction: &'a TransactionView, tx_version: Version) -> Self { VersionVerifier { - rtx, - consensus, - tx_env, + transaction, + tx_version, } } pub fn verify(&self) -> Result<(), Error> { - let proposal_window = self.consensus.tx_proposal_window(); - let epoch_number = self.tx_env.epoch_number(proposal_window); - let target = self.consensus.tx_version(epoch_number); - let actual = self.rtx.transaction.version(); - if self - .consensus - .hardfork_switch() - .is_allow_unknown_versions_enabled(epoch_number) - { - if actual < target { - return Err((TransactionError::DeprecatedVersion { - minimum: target, - actual, - }) - .into()); - } - } else if actual != target { + if self.transaction.version() != self.tx_version { return Err((TransactionError::MismatchedVersion { - expected: target, - actual, + expected: self.tx_version, + actual: self.transaction.version(), }) .into()); } From bc5c61f7f255d870b29de546a154503d94b5e1c0 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 10 Jun 2021 17:40:22 +0800 Subject: [PATCH 15/18] Revert "feat(hardfork): allow loading uncommitted cell data hashes from tx pool" This reverts commit 10ffedf61e1edefe6a4e3da910ed5903b4066226. --- benches/benches/benchmarks/resolve.rs | 6 +- benches/benches/benchmarks/util.rs | 8 +- chain/src/chain.rs | 4 - chain/src/tests/basic.rs | 14 ++-- chain/src/tests/cell.rs | 16 ++-- chain/src/tests/load_input_data_hash_cell.rs | 69 ++++------------- .../tests/non_contextual_block_txs_verify.rs | 1 + chain/src/tests/util.rs | 8 +- rpc/src/module/chain.rs | 5 +- rpc/src/module/experiment.rs | 30 ++------ rpc/src/module/test.rs | 8 +- rpc/src/test.rs | 10 +-- script/src/syscalls/load_cell.rs | 14 +--- script/src/syscalls/mod.rs | 7 -- script/src/verify.rs | 16 +--- spec/src/hardfork.rs | 6 +- store/src/store.rs | 2 +- sync/src/synchronizer/mod.rs | 11 +-- sync/src/tests/synchronizer.rs | 1 - sync/src/tests/util.rs | 1 - test/template/specs/integration.toml | 1 - tx-pool/src/component/pending.rs | 7 +- tx-pool/src/component/proposed.rs | 7 +- tx-pool/src/pool.rs | 30 +------- util/snapshot/src/lib.rs | 6 +- util/test-chain-utils/src/mock_store.rs | 7 +- util/types/src/core/cell.rs | 77 ++++++------------- util/types/src/core/hardfork.rs | 17 ---- 28 files changed, 82 insertions(+), 307 deletions(-) diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 1373252a11..514ad01e99 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -132,8 +132,7 @@ fn bench(c: &mut Criterion) { let mut seen_inputs = HashSet::new(); for tx in txs.clone() { - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot, false) - .unwrap(); + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot).unwrap(); } i -= 1; @@ -159,8 +158,7 @@ fn bench(c: &mut Criterion) { let rtxs: Vec<_> = txs .into_iter() .map(|tx| { - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot, false) - .unwrap() + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot).unwrap() }) .collect(); diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 97498bd8bf..d99f76f20a 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -487,13 +487,7 @@ pub fn dao_data(shared: &Shared, parent: &HeaderView, txs: &[TransactionView]) - let snapshot: &Snapshot = &shared.snapshot(); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, snapshot); let rtxs = txs.iter().cloned().try_fold(vec![], |mut rtxs, tx| { - let rtx = resolve_transaction( - tx, - &mut seen_inputs, - &overlay_cell_provider, - snapshot, - false, - ); + let rtx = resolve_transaction(tx, &mut seen_inputs, &overlay_cell_provider, snapshot); match rtx { Ok(rtx) => { rtxs.push(rtx); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 993c287fd6..b88efe00e0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -684,7 +684,6 @@ impl ChainService { let verify_context = VerifyContext::new(txn, self.shared.consensus()); let async_handle = self.shared.tx_pool_controller().handle(); - let hardfork_switch = self.shared.consensus().hardfork_switch(); let mut found_error = None; for (ext, b) in fork @@ -705,8 +704,6 @@ impl ChainService { }; let transactions = b.transactions(); - let allow_in_txpool = hardfork_switch - .is_allow_cell_data_hash_in_txpool_enabled(b.epoch().number()); let resolved = { let txn_cell_provider = txn.cell_provider(); let cell_provider = OverlayCellProvider::new(&block_cp, &txn_cell_provider); @@ -719,7 +716,6 @@ impl ChainService { &mut seen_inputs, &cell_provider, &verify_context, - allow_in_txpool, ) }) .collect::, _>>() diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index 3fff143171..292c1207d5 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -115,7 +115,7 @@ fn test_genesis_transaction_spend() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(genesis_tx_hash, 0), false, false), + .cell(&OutPoint::new(genesis_tx_hash, 0), false), CellStatus::Unknown ); } @@ -142,7 +142,7 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(hash.to_owned().to_owned(), 0), false, false), + .cell(&OutPoint::new(hash.to_owned().to_owned(), 0), false), CellStatus::Unknown ); } @@ -171,14 +171,12 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(last_cellbase_hash, 0), false, false), + .cell(&OutPoint::new(last_cellbase_hash, 0), false), CellStatus::Unknown ); assert_eq!( - shared - .snapshot() - .cell(&OutPoint::new(tx1_hash, 0), false, false), + shared.snapshot().cell(&OutPoint::new(tx1_hash, 0), false), CellStatus::Unknown ); @@ -191,7 +189,7 @@ fn test_transaction_spend_in_same_block() { assert_eq!( shared .snapshot() - .cell(&OutPoint::new(tx2_hash.clone(), 0), false, false), + .cell(&OutPoint::new(tx2_hash.clone(), 0), false), CellStatus::live_cell(CellMeta { cell_output: tx2_output, data_bytes: tx2_output_data.len() as u64, @@ -377,7 +375,7 @@ fn test_genesis_transaction_fetch() { let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); let out_point = OutPoint::new(root_hash, 0); - let state = shared.snapshot().cell(&out_point, false, false); + let state = shared.snapshot().cell(&out_point, false); assert!(state.is_live()); } diff --git a/chain/src/tests/cell.rs b/chain/src/tests/cell.rs index 68ed98a8f1..41ca204d4f 100644 --- a/chain/src/tests/cell.rs +++ b/chain/src/tests/cell.rs @@ -144,21 +144,18 @@ fn test_block_cells_update() { for tx in block.transactions()[1..4].iter() { for pt in tx.output_pts() { // full spent - assert_eq!( - txn_cell_provider.cell(&pt, false, false), - CellStatus::Unknown - ); + assert_eq!(txn_cell_provider.cell(&pt, false), CellStatus::Unknown); } } // ensure tx3 outputs is unspent after attach_block_cell for pt in block.transactions()[4].output_pts() { - assert!(txn_cell_provider.cell(&pt, false, false).is_live()); + assert!(txn_cell_provider.cell(&pt, false).is_live()); } // ensure issue_tx outputs is spent after attach_block_cell assert_eq!( - txn_cell_provider.cell(&issue_tx.output_pts()[0], false, false), + txn_cell_provider.cell(&issue_tx.output_pts()[0], false), CellStatus::Unknown ); @@ -167,15 +164,12 @@ fn test_block_cells_update() { // ensure tx0-3 outputs is unknown after detach_block_cell for tx in block.transactions()[1..=4].iter() { for pt in tx.output_pts() { - assert_eq!( - txn_cell_provider.cell(&pt, false, false), - CellStatus::Unknown - ); + assert_eq!(txn_cell_provider.cell(&pt, false), CellStatus::Unknown); } } // ensure issue_tx outputs is back to live after detach_block_cell assert!(txn_cell_provider - .cell(&issue_tx.output_pts()[0], false, false) + .cell(&issue_tx.output_pts()[0], false) .is_live()); } diff --git a/chain/src/tests/load_input_data_hash_cell.rs b/chain/src/tests/load_input_data_hash_cell.rs index 65fb76663e..f43e7770a5 100644 --- a/chain/src/tests/load_input_data_hash_cell.rs +++ b/chain/src/tests/load_input_data_hash_cell.rs @@ -4,13 +4,12 @@ use crate::tests::util::{ use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_dao_utils::genesis_dao_data; use ckb_test_chain_utils::load_input_data_hash_cell; -use ckb_tx_pool::{PlugTarget, TxEntry}; use ckb_types::prelude::*; use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, Capacity, EpochNumberWithFraction, - TransactionBuilder, TransactionView, + capacity_bytes, BlockBuilder, Capacity, EpochNumberWithFraction, TransactionBuilder, + TransactionView, }, packed::{CellDep, CellInput, CellOutputBuilder, OutPoint}, utilities::DIFF_TWO, @@ -49,8 +48,7 @@ pub(crate) fn create_load_input_data_hash_transaction( .build() } -// Ensure tx-pool reject or accept tx which calls syscall load_cell_data_hash from input base on -// hardfork features. +// Ensure tx-pool accept tx which calls syscall load_cell_data_hash from input #[test] fn test_load_input_data_hash_cell() { let (_, _, load_input_data_hash_script) = load_input_data_hash_cell(); @@ -76,57 +74,20 @@ fn test_load_input_data_hash_cell() { .dao(dao) .build(); - { - // Test CKB v2019 reject - let hardfork_switch = HardForkSwitch::new_without_any_enabled(); - let consensus = ConsensusBuilder::default() - .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) - .genesis_block(genesis_block.clone()) - .hardfork_switch(hardfork_switch) - .build(); - - let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); - - let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); - let tx1 = create_load_input_data_hash_transaction(&tx0, 0); - - let tx_pool = shared.tx_pool_controller(); - let ret = tx_pool.submit_local_tx(tx0.clone()).unwrap(); - assert!(ret.is_err()); - //ValidationFailure(2) missing item - assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); - - let entry0 = vec![TxEntry::dummy_resolve(tx0, 0, Capacity::shannons(0), 100)]; - tx_pool.plug_entry(entry0, PlugTarget::Proposed).unwrap(); - - // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already in tx-pool - let ret = tx_pool.submit_local_tx(tx1).unwrap(); - assert!(ret.is_err()); - assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); - } - { - // Test CKB v2021 accept - let hardfork_switch = HardForkSwitch::new_without_any_enabled() - .as_builder() - .rfc_pr_0228(0) - .build() - .unwrap(); - let consensus = ConsensusBuilder::default() - .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) - .genesis_block(genesis_block) - .hardfork_switch(hardfork_switch) - .build(); + let consensus = ConsensusBuilder::default() + .cellbase_maturity(EpochNumberWithFraction::new(0, 0, 1)) + .genesis_block(genesis_block) + .build(); - let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); + let (_chain_controller, shared, _parent) = start_chain(Some(consensus)); - let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); - let tx1 = create_load_input_data_hash_transaction(&tx0, 0); + let tx0 = create_load_input_data_hash_transaction(&issue_tx, 0); + let tx1 = create_load_input_data_hash_transaction(&tx0, 0); - let tx_pool = shared.tx_pool_controller(); - let ret = tx_pool.submit_local_tx(tx0).unwrap(); - assert!(ret.is_ok()); + let tx_pool = shared.tx_pool_controller(); + let ret = tx_pool.submit_local_tx(tx0).unwrap(); + assert!(ret.is_ok()); - let ret = tx_pool.submit_local_tx(tx1).unwrap(); - assert!(ret.is_ok()); - } + let ret = tx_pool.submit_local_tx(tx1).unwrap(); + assert!(ret.is_ok()); } diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index 8201fb36d9..1266f4c3f6 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -50,6 +50,7 @@ pub(crate) fn create_cellbase( } } +#[allow(clippy::too_many_arguments)] pub(crate) fn gen_block( parent_header: &HeaderView, transactions: Vec, diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 5506e9bdc7..f3acf25321 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -531,13 +531,7 @@ pub fn dao_data( let transactions_provider = TransactionsProvider::new(txs.iter()); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, store); let rtxs = txs.iter().try_fold(vec![], |mut rtxs, tx| { - let rtx = resolve_transaction( - tx.clone(), - &mut seen_inputs, - &overlay_cell_provider, - store, - false, - ); + let rtx = resolve_transaction(tx.clone(), &mut seen_inputs, &overlay_cell_provider, store); match rtx { Ok(rtx) => { rtxs.push(rtx); diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 853a3a07a5..aac5f8e5ed 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1393,10 +1393,7 @@ impl ChainRpc for ChainRpcImpl { } fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> Result { - let cell_status = self - .shared - .snapshot() - .cell(&out_point.into(), with_data, true); + let cell_status = self.shared.snapshot().cell(&out_point.into(), with_data); Ok(cell_status.into()) } diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index 814d5ef0af..b187462dfa 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -214,12 +214,7 @@ pub(crate) struct DryRunner<'a> { } impl<'a> CellProvider for DryRunner<'a> { - fn cell( - &self, - out_point: &packed::OutPoint, - with_data: bool, - _allow_in_txpool: bool, - ) -> CellStatus { + fn cell(&self, out_point: &packed::OutPoint, with_data: bool) -> CellStatus { let snapshot = self.shared.snapshot(); snapshot .get_cell(out_point) @@ -249,27 +244,12 @@ impl<'a> DryRunner<'a> { pub(crate) fn run(&self, tx: packed::Transaction) -> Result { let snapshot: &Snapshot = &self.shared.snapshot(); - let consensus = snapshot.consensus(); - let tx_env = { - let tip_header = snapshot.tip_header(); - TxVerifyEnv::new_submit(&tip_header) - }; - let allow_in_txpool = { - let proposal_window = consensus.tx_proposal_window(); - let epoch_number = tx_env.epoch_number(proposal_window); - consensus - .hardfork_switch() - .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) - }; - match resolve_transaction( - tx.into_view(), - &mut HashSet::new(), - self, - self, - allow_in_txpool, - ) { + match resolve_transaction(tx.into_view(), &mut HashSet::new(), self, self) { Ok(resolved) => { + let consensus = snapshot.consensus(); let max_cycles = consensus.max_block_cycles; + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_submit(&tip_header); match ScriptVerifier::new( &resolved, consensus, diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index 93641af36e..ea364525c9 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -2,9 +2,7 @@ use crate::error::RPCError; use ckb_app_config::BlockAssemblerConfig; use ckb_chain::chain::ChainController; use ckb_dao::DaoCalculator; -use ckb_jsonrpc_types::{ - AsEpochNumberWithFraction, Block, BlockTemplate, Cycle, JsonBytes, Script, Transaction, -}; +use ckb_jsonrpc_types::{Block, BlockTemplate, Cycle, JsonBytes, Script, Transaction}; use ckb_logger::error; use ckb_network::{NetworkController, SupportProtocols}; use ckb_shared::{shared::Shared, Snapshot}; @@ -181,16 +179,12 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { let transactions_provider = TransactionsProvider::new(txs.as_slice().iter()); let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, snapshot); - let allow_in_txpool = consensus - .hardfork_switch() - .is_allow_cell_data_hash_in_txpool_enabled(block_template.epoch.epoch_number()); let rtxs = txs.iter().map(|tx| { resolve_transaction( tx.clone(), &mut seen_inputs, &overlay_cell_provider, snapshot, - allow_in_txpool, ).map_err(|err| { error!( "resolve transactions error when generating block with block template, error: {:?}", diff --git a/rpc/src/test.rs b/rpc/src/test.rs index d0639cd5b1..e166d74e0e 100644 --- a/rpc/src/test.rs +++ b/rpc/src/test.rs @@ -110,14 +110,8 @@ fn next_block(shared: &Shared, parent: &HeaderView) -> BlockView { let cellbase = always_success_cellbase(parent.number() + 1, reward.total, shared.consensus()); let dao = { - let resolved_cellbase = resolve_transaction( - cellbase.clone(), - &mut HashSet::new(), - snapshot, - snapshot, - false, - ) - .unwrap(); + let resolved_cellbase = + resolve_transaction(cellbase.clone(), &mut HashSet::new(), snapshot, snapshot).unwrap(); let data_loader = shared.store().as_data_provider(); DaoCalculator::new(shared.consensus(), &data_loader) .dao_field(&[resolved_cellbase], parent) diff --git a/script/src/syscalls/load_cell.rs b/script/src/syscalls/load_cell.rs index 3f19738496..9a2d85710d 100644 --- a/script/src/syscalls/load_cell.rs +++ b/script/src/syscalls/load_cell.rs @@ -24,7 +24,6 @@ pub struct LoadCell<'a, DL> { resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], - allow_cell_data_hash_in_txpool: bool, } impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { @@ -35,7 +34,6 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], - allow_cell_data_hash_in_txpool: bool, ) -> LoadCell<'a, DL> { LoadCell { data_loader, @@ -44,7 +42,6 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { resolved_cell_deps, group_inputs, group_outputs, - allow_cell_data_hash_in_txpool, } } @@ -105,15 +102,8 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { (SUCCESS, store_data(machine, &buffer)?) } CellField::DataHash => { - if self.allow_cell_data_hash_in_txpool { - if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { - (SUCCESS, store_data(machine, &bytes.as_bytes())?) - } else { - (ITEM_MISSING, 0) - } - } else if let Some(data_hash) = &cell.mem_cell_data_hash { - let bytes = data_hash.raw_data(); - (SUCCESS, store_data(machine, &bytes)?) + if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { + (SUCCESS, store_data(machine, &bytes.as_bytes())?) } else { (ITEM_MISSING, 0) } diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index dfe6543811..f3864aa036 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -253,7 +253,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); prop_assert!(load_cell.ecall(&mut machine).is_ok()); @@ -298,7 +297,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -391,7 +389,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -446,7 +443,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); let input_correct_data = input_cell.cell_output.as_slice(); @@ -514,7 +510,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); prop_assert!(machine.memory_mut().store64(&size_addr, &16).is_ok()); @@ -569,7 +564,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); assert!(machine.memory_mut().store64(&size_addr, &100).is_ok()); @@ -911,7 +905,6 @@ mod tests { &resolved_cell_deps, &group_inputs, &group_outputs, - false, ); prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok()); diff --git a/script/src/verify.rs b/script/src/verify.rs index d55ee5e55c..4017bf601a 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -240,7 +240,6 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D &'a self, group_inputs: &'a [usize], group_outputs: &'a [usize], - allow_cell_data_hash_in_txpool: bool, ) -> LoadCell<'a, DL> { LoadCell::new( &self.data_loader, @@ -249,7 +248,6 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D self.resolved_cell_deps(), group_inputs, group_outputs, - allow_cell_data_hash_in_txpool, ) } @@ -427,20 +425,12 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D script_group: &'a ScriptGroup, ) -> Vec + 'a)>> { let current_script_hash = script_group.script.calc_script_hash(); - let proposal_window = self.consensus.tx_proposal_window(); - let epoch_number = self.tx_env.epoch_number(proposal_window); - let allow_cell_data_hash_in_txpool = self - .consensus - .hardfork_switch() - .is_allow_cell_data_hash_in_txpool_enabled(epoch_number); vec![ Box::new(self.build_load_script_hash(current_script_hash.clone())), Box::new(self.build_load_tx()), - Box::new(self.build_load_cell( - &script_group.input_indices, - &script_group.output_indices, - allow_cell_data_hash_in_txpool, - )), + Box::new( + self.build_load_cell(&script_group.input_indices, &script_group.output_indices), + ), Box::new(self.build_load_input(&script_group.input_indices)), Box::new(self.build_load_header(&script_group.input_indices)), Box::new( diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 3a669f258f..e8021f0784 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -20,8 +20,6 @@ pub struct HardForkConfig { pub rfc_pr_0223: Option, /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0224: Option, - /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) - pub rfc_pr_0228: Option, } macro_rules! check_default { @@ -68,8 +66,7 @@ impl HardForkConfig { .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) - .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)) - .rfc_pr_0228(check_default!(self, rfc_pr_0228, ckb2021)); + .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)); Ok(builder) } @@ -82,7 +79,6 @@ impl HardForkConfig { .rfc_pr_0222(self.rfc_pr_0222.unwrap_or(default)) .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) .rfc_pr_0224(self.rfc_pr_0224.unwrap_or(default)) - .rfc_pr_0228(self.rfc_pr_0228.unwrap_or(default)) .build() } } diff --git a/store/src/store.rs b/store/src/store.rs index 18729aa7e9..dcc7e08d6a 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -530,7 +530,7 @@ impl<'a, S> CellProvider for CellProviderWrapper<'a, S> where S: ChainStore<'a>, { - fn cell(&self, out_point: &OutPoint, with_data: bool, _allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { match self.0.get_cell(out_point) { Some(mut cell_meta) => { if with_data { diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 62ec74a7c7..bc665402d7 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -935,14 +935,9 @@ mod tests { let cellbase = create_cellbase(shared, parent_header, number); let dao = { let snapshot: &Snapshot = &shared.snapshot(); - let resolved_cellbase = resolve_transaction( - cellbase.clone(), - &mut HashSet::new(), - snapshot, - snapshot, - false, - ) - .unwrap(); + let resolved_cellbase = + resolve_transaction(cellbase.clone(), &mut HashSet::new(), snapshot, snapshot) + .unwrap(); let data_loader = shared.store().as_data_provider(); DaoCalculator::new(shared.consensus(), &data_loader) .dao_field(&[resolved_cellbase], parent_header) diff --git a/sync/src/tests/synchronizer.rs b/sync/src/tests/synchronizer.rs index c0c3856e23..1d47bbb4c4 100644 --- a/sync/src/tests/synchronizer.rs +++ b/sync/src/tests/synchronizer.rs @@ -146,7 +146,6 @@ fn setup_node(height: u64) -> (TestNode, Shared) { &mut HashSet::new(), snapshot.as_ref(), snapshot.as_ref(), - false, ) .unwrap(); let data_loader = snapshot.as_data_provider(); diff --git a/sync/src/tests/util.rs b/sync/src/tests/util.rs index b551e78ca0..7ca67116ce 100644 --- a/sync/src/tests/util.rs +++ b/sync/src/tests/util.rs @@ -63,7 +63,6 @@ pub fn inherit_block(shared: &Shared, parent_hash: &Byte32) -> BlockBuilder { &mut HashSet::new(), snapshot.as_ref(), snapshot.as_ref(), - false, ) .unwrap(); let data_loader = snapshot.as_data_provider(); diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index b764e964f2..cf1b5ff7b7 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -73,7 +73,6 @@ rfc_pr_0221 = 9_223_372_036_854_775_807 rfc_pr_0222 = 9_223_372_036_854_775_807 rfc_pr_0223 = 9_223_372_036_854_775_807 rfc_pr_0224 = 9_223_372_036_854_775_807 -rfc_pr_0228 = 9_223_372_036_854_775_807 [pow] func = "Dummy" diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs index 4c5a576176..0e76fc6348 100644 --- a/tx-pool/src/component/pending.rs +++ b/tx-pool/src/component/pending.rs @@ -73,7 +73,7 @@ impl PendingQueue { } impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { let tx_hash = out_point.tx_hash(); if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { match entry @@ -81,12 +81,9 @@ impl CellProvider for PendingQueue { .output_with_data(out_point.index().unpack()) { Some((output, data)) => { - let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) + let cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); - if !allow_in_txpool && !with_data { - cell_meta.mem_cell_data_hash = None; - } CellStatus::live_cell(cell_meta) } None => CellStatus::Unknown, diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs index 2929d18004..c365ccadf9 100644 --- a/tx-pool/src/component/proposed.rs +++ b/tx-pool/src/component/proposed.rs @@ -97,19 +97,16 @@ pub struct ProposedPool { } impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { if let Some(x) = self.edges.get_output_ref(out_point) { // output consumed if x.is_some() { CellStatus::Dead } else { let (output, data) = self.get_output_with_data(out_point).expect("output"); - let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) + let cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); - if !allow_in_txpool && !with_data { - cell_meta.mem_cell_data_hash = None; - } CellStatus::live_cell(cell_meta) } } else if self.edges.get_input_ref(out_point).is_some() { diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 14cad81f19..2aa4a726af 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -302,22 +302,11 @@ impl TxPool { let pending_and_proposed_provider = OverlayCellProvider::new(&self.pending, &gap_and_proposed_provider); let mut seen_inputs = HashSet::new(); - let allow_in_txpool = { - let tip_header = snapshot.tip_header(); - let consensus = snapshot.consensus(); - let proposal_window = consensus.tx_proposal_window(); - let tx_env = TxVerifyEnv::new_submit(tip_header); - let epoch_number = tx_env.epoch_number(proposal_window); - consensus - .hardfork_switch() - .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) - }; resolve_transaction( tx, &mut seen_inputs, &pending_and_proposed_provider, snapshot, - allow_in_txpool, ) .map_err(Reject::Resolve) } @@ -343,24 +332,7 @@ impl TxPool { let snapshot = self.snapshot(); let cell_provider = OverlayCellProvider::new(&self.proposed, snapshot); let mut seen_inputs = HashSet::new(); - let allow_in_txpool = { - let tip_header = snapshot.tip_header(); - let consensus = snapshot.consensus(); - let proposal_window = consensus.tx_proposal_window(); - let tx_env = TxVerifyEnv::new_proposed(tip_header, 1); - let epoch_number = tx_env.epoch_number(proposal_window); - consensus - .hardfork_switch() - .is_allow_cell_data_hash_in_txpool_enabled(epoch_number) - }; - resolve_transaction( - tx, - &mut seen_inputs, - &cell_provider, - snapshot, - allow_in_txpool, - ) - .map_err(Reject::Resolve) + resolve_transaction(tx, &mut seen_inputs, &cell_provider, snapshot).map_err(Reject::Resolve) } pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 8cea796808..2fc1745061 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -176,10 +176,8 @@ impl<'a> ChainStore<'a> for Snapshot { } impl CellProvider for Snapshot { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { - self.store - .cell_provider() - .cell(out_point, with_data, allow_in_txpool) + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + self.store.cell_provider().cell(out_point, with_data) } } diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 466aa4f943..896e456b19 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -71,7 +71,7 @@ impl MockStore { } impl CellProvider for MockStore { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { match self.0.get_transaction(&out_point.tx_hash()) { Some((tx, _)) => tx .outputs() @@ -82,12 +82,9 @@ impl CellProvider for MockStore { .get(out_point.index().unpack()) .expect("output data"); - let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) + let cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) .out_point(out_point.to_owned()) .build(); - if !allow_in_txpool && !with_data { - cell_meta.mem_cell_data_hash = None; - } CellStatus::live_cell(cell_meta) }) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 0850383c0a..bdf0278df3 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -372,7 +372,7 @@ where /// TODO(doc): @quake pub trait CellProvider { /// TODO(doc): @quake - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus; + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus; } /// TODO(doc): @quake @@ -400,13 +400,11 @@ where A: CellProvider, B: CellProvider, { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { - match self.overlay.cell(out_point, with_data, allow_in_txpool) { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { + match self.overlay.cell(out_point, with_data) { CellStatus::Live(cell_meta) => CellStatus::Live(cell_meta), CellStatus::Dead => CellStatus::Dead, - CellStatus::Unknown => self - .cell_provider - .cell(out_point, with_data, allow_in_txpool), + CellStatus::Unknown => self.cell_provider.cell(out_point, with_data), } } } @@ -454,7 +452,7 @@ impl<'a> BlockCellProvider<'a> { } impl<'a> CellProvider for BlockCellProvider<'a> { - fn cell(&self, out_point: &OutPoint, _with_data: bool, _allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { self.output_indices .get(&out_point.tx_hash()) .and_then(|i| { @@ -534,7 +532,7 @@ impl<'a> TransactionsProvider<'a> { } impl<'a> CellProvider for TransactionsProvider<'a> { - fn cell(&self, out_point: &OutPoint, with_data: bool, allow_in_txpool: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { match self.transactions.get(&out_point.tx_hash()) { Some(tx) => tx .outputs() @@ -545,10 +543,7 @@ impl<'a> CellProvider for TransactionsProvider<'a> { .get(out_point.index().unpack()) .expect("output data") .raw_data(); - let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); - if !allow_in_txpool && !with_data { - cell_meta.mem_cell_data_hash = None; - } + let cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); CellStatus::live_cell(cell_meta) }) .unwrap_or(CellStatus::Unknown), @@ -627,7 +622,6 @@ pub fn resolve_transaction( seen_inputs: &mut HashSet, cell_provider: &CP, header_checker: &HC, - allow_in_txpool: bool, ) -> Result { let (mut resolved_inputs, mut resolved_cell_deps, mut resolved_dep_groups) = ( Vec::with_capacity(transaction.inputs().len()), @@ -641,7 +635,7 @@ pub fn resolve_transaction( return Err(OutPointError::Dead(out_point.clone())); } - let cell_status = cell_provider.cell(out_point, with_data, allow_in_txpool); + let cell_status = cell_provider.cell(out_point, with_data); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -743,7 +737,7 @@ fn build_cell_meta_from_out_point( cell_provider: &CP, out_point: &OutPoint, ) -> Result { - let cell_status = cell_provider.cell(out_point, true, false); + let cell_status = cell_provider.cell(out_point, true); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -861,7 +855,7 @@ mod tests { cells: HashMap>, } impl CellProvider for CellMemoryDb { - fn cell(&self, o: &OutPoint, _with_data: bool, _allow_in_txpool: bool) -> CellStatus { + fn cell(&self, o: &OutPoint, _with_data: bool) -> CellStatus { match self.cells.get(o) { Some(&Some(ref cell_meta)) => CellStatus::live_cell(cell_meta.clone()), Some(&None) => CellStatus::Dead, @@ -918,9 +912,9 @@ mod tests { db.cells.insert(p1.clone(), Some(o.clone())); db.cells.insert(p2.clone(), None); - assert_eq!(CellStatus::Live(o), db.cell(&p1, false, false)); - assert_eq!(CellStatus::Dead, db.cell(&p2, false, false)); - assert_eq!(CellStatus::Unknown, db.cell(&p3, false, false)); + assert_eq!(CellStatus::Live(o), db.cell(&p1, false)); + assert_eq!(CellStatus::Dead, db.cell(&p2, false)); + assert_eq!(CellStatus::Unknown, db.cell(&p3, false)); } #[test] @@ -959,7 +953,6 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, - false, ) .unwrap(); @@ -993,7 +986,6 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, - false, ); assert_error_eq!(result.unwrap_err(), OutPointError::InvalidDepGroup(op_dep)); } @@ -1023,7 +1015,6 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, - false, ); assert_error_eq!(result.unwrap_err(), OutPointError::Unknown(op_unknown),); } @@ -1050,7 +1041,6 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, - false, ); assert!(result.is_ok()); @@ -1077,7 +1067,6 @@ mod tests { &mut seen_inputs, &cell_provider, &header_checker, - false, ); assert_error_eq!( @@ -1170,8 +1159,8 @@ mod tests { .build(); let mut seen_inputs = HashSet::new(); - let rtx = resolve_transaction(tx, &mut seen_inputs, &cell_provider, &header_checker, false) - .unwrap(); + let rtx = + resolve_transaction(tx, &mut seen_inputs, &cell_provider, &header_checker).unwrap(); assert_eq!(rtx.resolved_cell_deps[0], dummy_cell_meta,); } @@ -1198,22 +1187,12 @@ mod tests { .build(); let mut seen_inputs = HashSet::new(); - let result1 = resolve_transaction( - tx1, - &mut seen_inputs, - &cell_provider, - &header_checker, - false, - ); + let result1 = + resolve_transaction(tx1, &mut seen_inputs, &cell_provider, &header_checker); assert!(result1.is_ok()); - let result2 = resolve_transaction( - tx2, - &mut seen_inputs, - &cell_provider, - &header_checker, - false, - ); + let result2 = + resolve_transaction(tx2, &mut seen_inputs, &cell_provider, &header_checker); assert!(result2.is_ok()); } @@ -1229,23 +1208,13 @@ mod tests { let tx2 = TransactionBuilder::default().cell_dep(dep).build(); let mut seen_inputs = HashSet::new(); - let result1 = resolve_transaction( - tx1, - &mut seen_inputs, - &cell_provider, - &header_checker, - false, - ); + let result1 = + resolve_transaction(tx1, &mut seen_inputs, &cell_provider, &header_checker); assert!(result1.is_ok()); - let result2 = resolve_transaction( - tx2, - &mut seen_inputs, - &cell_provider, - &header_checker, - false, - ); + let result2 = + resolve_transaction(tx2, &mut seen_inputs, &cell_provider, &header_checker); assert_error_eq!(result2.unwrap_err(), OutPointError::Dead(out_point)); } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs index 5611f30df5..cb4a0e98fd 100644 --- a/util/types/src/core/hardfork.rs +++ b/util/types/src/core/hardfork.rs @@ -98,7 +98,6 @@ pub struct HardForkSwitch { rfc_pr_0222: EpochNumber, rfc_pr_0223: EpochNumber, rfc_pr_0224: EpochNumber, - rfc_pr_0228: EpochNumber, } /// Builder for [`HardForkSwitch`]. @@ -125,11 +124,6 @@ pub struct HardForkSwitchBuilder { /// /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) pub rfc_pr_0224: Option, - /// Let the syscall `load_cell_data_hash` return correct data hash - /// for cells which are still in the tx pool and not committed yet. - /// - /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) - pub rfc_pr_0228: Option, } impl HardForkSwitch { @@ -145,7 +139,6 @@ impl HardForkSwitch { .rfc_pr_0222(self.rfc_pr_0222()) .rfc_pr_0223(self.rfc_pr_0223()) .rfc_pr_0224(self.rfc_pr_0224()) - .rfc_pr_0228(self.rfc_pr_0228()) } /// Creates a new instance that all hard fork features are disabled forever. @@ -156,7 +149,6 @@ impl HardForkSwitch { .disable_rfc_pr_0222() .disable_rfc_pr_0223() .disable_rfc_pr_0224() - .disable_rfc_pr_0228() .build() .unwrap() } @@ -190,13 +182,6 @@ define_methods!( disable_rfc_pr_0224, "RFC PR 0224" ); -define_methods!( - rfc_pr_0228, - allow_cell_data_hash_in_txpool, - is_allow_cell_data_hash_in_txpool_enabled, - disable_rfc_pr_0228, - "RFC PR 0228" -); impl HardForkSwitchBuilder { /// Build a new [`HardForkSwitch`]. @@ -217,13 +202,11 @@ impl HardForkSwitchBuilder { let rfc_pr_0222 = try_find!(rfc_pr_0222); let rfc_pr_0223 = try_find!(rfc_pr_0223); let rfc_pr_0224 = try_find!(rfc_pr_0224); - let rfc_pr_0228 = try_find!(rfc_pr_0228); Ok(HardForkSwitch { rfc_pr_0221, rfc_pr_0222, rfc_pr_0223, rfc_pr_0224, - rfc_pr_0228, }) } } From 4e1109e2434d5c677a60a04ffae2abbc0ee13ac4 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 10 Jun 2021 17:41:54 +0800 Subject: [PATCH 16/18] Revert "Merge branch zhangsoledad/fix_load_cell_data_hash (commit: 33eb0d9) into branch ckb2021-develop" This reverts commit 0a1225c11d1053cb4b77672ea4e0b22bd782d717, reversing changes made to 40847cad75f4967b4723286927533dd90431d33c. --- chain/src/tests/load_input_data_hash_cell.rs | 16 +++++++++++---- script/src/syscalls/load_cell.rs | 17 +++++++--------- script/src/syscalls/mod.rs | 21 -------------------- script/src/verify.rs | 3 +-- tx-pool/src/component/pending.rs | 7 +++++-- tx-pool/src/component/proposed.rs | 7 +++++-- util/test-chain-utils/src/mock_store.rs | 7 +++++-- util/types/src/core/cell.rs | 7 +++++-- 8 files changed, 40 insertions(+), 45 deletions(-) diff --git a/chain/src/tests/load_input_data_hash_cell.rs b/chain/src/tests/load_input_data_hash_cell.rs index f43e7770a5..b3b77fb81b 100644 --- a/chain/src/tests/load_input_data_hash_cell.rs +++ b/chain/src/tests/load_input_data_hash_cell.rs @@ -4,6 +4,7 @@ use crate::tests::util::{ use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_dao_utils::genesis_dao_data; use ckb_test_chain_utils::load_input_data_hash_cell; +use ckb_tx_pool::{PlugTarget, TxEntry}; use ckb_types::prelude::*; use ckb_types::{ bytes::Bytes, @@ -48,7 +49,7 @@ pub(crate) fn create_load_input_data_hash_transaction( .build() } -// Ensure tx-pool accept tx which calls syscall load_cell_data_hash from input +// Ensure tx-pool reject tx which calls syscall load_cell_data_hash from input #[test] fn test_load_input_data_hash_cell() { let (_, _, load_input_data_hash_script) = load_input_data_hash_cell(); @@ -85,9 +86,16 @@ fn test_load_input_data_hash_cell() { let tx1 = create_load_input_data_hash_transaction(&tx0, 0); let tx_pool = shared.tx_pool_controller(); - let ret = tx_pool.submit_local_tx(tx0).unwrap(); - assert!(ret.is_ok()); + let ret = tx_pool.submit_local_tx(tx0.clone()).unwrap(); + assert!(ret.is_err()); + //ValidationFailure(2) missing item + assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); + let entry0 = vec![TxEntry::dummy_resolve(tx0, 0, Capacity::shannons(0), 100)]; + tx_pool.plug_entry(entry0, PlugTarget::Proposed).unwrap(); + + // Ensure tx which calls syscall load_cell_data_hash will got reject even previous tx is already in tx-pool let ret = tx_pool.submit_local_tx(tx1).unwrap(); - assert!(ret.is_ok()); + assert!(ret.is_err()); + assert!(format!("{}", ret.err().unwrap()).contains("ValidationFailure(2)")); } diff --git a/script/src/syscalls/load_cell.rs b/script/src/syscalls/load_cell.rs index 9a2d85710d..a34cd0cb04 100644 --- a/script/src/syscalls/load_cell.rs +++ b/script/src/syscalls/load_cell.rs @@ -6,7 +6,6 @@ use crate::{ }, }; use byteorder::{LittleEndian, WriteBytesExt}; -use ckb_traits::CellDataProvider; use ckb_types::{ core::{cell::CellMeta, Capacity}, packed::CellOutput, @@ -17,8 +16,7 @@ use ckb_vm::{ Error as VMError, Register, SupportMachine, Syscalls, }; -pub struct LoadCell<'a, DL> { - data_loader: &'a DL, +pub struct LoadCell<'a> { outputs: &'a [CellMeta], resolved_inputs: &'a [CellMeta], resolved_cell_deps: &'a [CellMeta], @@ -26,17 +24,15 @@ pub struct LoadCell<'a, DL> { group_outputs: &'a [usize], } -impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { +impl<'a> LoadCell<'a> { pub fn new( - data_loader: &'a DL, outputs: &'a [CellMeta], resolved_inputs: &'a [CellMeta], resolved_cell_deps: &'a [CellMeta], group_inputs: &'a [usize], group_outputs: &'a [usize], - ) -> LoadCell<'a, DL> { + ) -> LoadCell<'a> { LoadCell { - data_loader, outputs, resolved_inputs, resolved_cell_deps, @@ -102,8 +98,9 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { (SUCCESS, store_data(machine, &buffer)?) } CellField::DataHash => { - if let Some(bytes) = self.data_loader.load_cell_data_hash(cell) { - (SUCCESS, store_data(machine, &bytes.as_bytes())?) + if let Some(data_hash) = &cell.mem_cell_data_hash { + let bytes = data_hash.raw_data(); + (SUCCESS, store_data(machine, &bytes)?) } else { (ITEM_MISSING, 0) } @@ -147,7 +144,7 @@ impl<'a, DL: CellDataProvider + 'a> LoadCell<'a, DL> { } } -impl<'a, Mac: SupportMachine, DL: CellDataProvider> Syscalls for LoadCell<'a, DL> { +impl<'a, Mac: SupportMachine> Syscalls for LoadCell<'a> { fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index f3864aa036..1382cb75fd 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -244,10 +244,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -288,10 +285,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -380,10 +374,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -434,10 +425,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -501,10 +489,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -555,10 +540,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, @@ -896,10 +878,7 @@ mod tests { let resolved_cell_deps = vec![]; let group_inputs = vec![]; let group_outputs = vec![]; - let store = new_store(); - let data_loader = DataLoaderWrapper::new(&store); let mut load_cell = LoadCell::new( - &data_loader, &outputs, &resolved_inputs, &resolved_cell_deps, diff --git a/script/src/verify.rs b/script/src/verify.rs index 4017bf601a..d7aef19394 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -240,9 +240,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D &'a self, group_inputs: &'a [usize], group_outputs: &'a [usize], - ) -> LoadCell<'a, DL> { + ) -> LoadCell<'a> { LoadCell::new( - &self.data_loader, &self.outputs, self.resolved_inputs(), self.resolved_cell_deps(), diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs index 0e76fc6348..02b26b6f2e 100644 --- a/tx-pool/src/component/pending.rs +++ b/tx-pool/src/component/pending.rs @@ -73,7 +73,7 @@ impl PendingQueue { } impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { let tx_hash = out_point.tx_hash(); if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { match entry @@ -81,9 +81,12 @@ impl CellProvider for PendingQueue { .output_with_data(out_point.index().unpack()) { Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) + let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); + if !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) } None => CellStatus::Unknown, diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs index c365ccadf9..342b639099 100644 --- a/tx-pool/src/component/proposed.rs +++ b/tx-pool/src/component/proposed.rs @@ -97,16 +97,19 @@ pub struct ProposedPool { } impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { if let Some(x) = self.edges.get_output_ref(out_point) { // output consumed if x.is_some() { CellStatus::Dead } else { let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) + let mut cell_meta = CellMetaBuilder::from_cell_output(output, data) .out_point(out_point.to_owned()) .build(); + if !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) } } else if self.edges.get_input_ref(out_point).is_some() { diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 896e456b19..7ac815a646 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -71,7 +71,7 @@ impl MockStore { } impl CellProvider for MockStore { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { match self.0.get_transaction(&out_point.tx_hash()) { Some((tx, _)) => tx .outputs() @@ -82,9 +82,12 @@ impl CellProvider for MockStore { .get(out_point.index().unpack()) .expect("output data"); - let cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) + let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data.unpack()) .out_point(out_point.to_owned()) .build(); + if !with_data { + cell_meta.mem_cell_data = None; + } CellStatus::live_cell(cell_meta) }) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index bdf0278df3..cf826c3f98 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -532,7 +532,7 @@ impl<'a> TransactionsProvider<'a> { } impl<'a> CellProvider for TransactionsProvider<'a> { - fn cell(&self, out_point: &OutPoint, _with_data: bool) -> CellStatus { + fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus { match self.transactions.get(&out_point.tx_hash()) { Some(tx) => tx .outputs() @@ -543,7 +543,10 @@ impl<'a> CellProvider for TransactionsProvider<'a> { .get(out_point.index().unpack()) .expect("output data") .raw_data(); - let cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); + let mut cell_meta = CellMetaBuilder::from_cell_output(cell, data).build(); + if !with_data { + cell_meta.mem_cell_data_hash = None; + } CellStatus::live_cell(cell_meta) }) .unwrap_or(CellStatus::Unknown), From 56c145e96c80e2971e8d85a43d8a8bc5ec8c5123 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Thu, 10 Jun 2021 17:54:01 +0800 Subject: [PATCH 17/18] Revert "refactor: remove useless parameter "with_data" because it always be true (tricky)" This reverts commit 40847cad75f4967b4723286927533dd90431d33c. --- util/types/src/core/cell.rs | 53 ++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index cf826c3f98..4098c7321d 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -600,11 +600,11 @@ fn parse_dep_group_data(slice: &[u8]) -> Result { } } -fn resolve_dep_group Result>( +fn resolve_dep_group Result>( out_point: &OutPoint, mut cell_resolver: F, ) -> Result<(CellMeta, Vec), OutPointError> { - let dep_group_cell = cell_resolver(out_point)?; + let dep_group_cell = cell_resolver(out_point, true)?; let data = dep_group_cell .mem_cell_data .clone() @@ -614,7 +614,7 @@ fn resolve_dep_group Result>( .map_err(|_| OutPointError::InvalidDepGroup(out_point.clone()))?; let mut resolved_deps = Vec::with_capacity(sub_out_points.len()); for sub_out_point in sub_out_points.into_iter() { - resolved_deps.push(cell_resolver(&sub_out_point)?); + resolved_deps.push(cell_resolver(&sub_out_point, true)?); } Ok((dep_group_cell, resolved_deps)) } @@ -633,18 +633,19 @@ pub fn resolve_transaction( ); let mut current_inputs = HashSet::new(); - let resolve_cell = |out_point: &OutPoint, with_data: bool| -> Result { - if seen_inputs.contains(out_point) { - return Err(OutPointError::Dead(out_point.clone())); - } + let mut resolve_cell = + |out_point: &OutPoint, with_data: bool| -> Result { + if seen_inputs.contains(out_point) { + return Err(OutPointError::Dead(out_point.clone())); + } - let cell_status = cell_provider.cell(out_point, with_data); - match cell_status { - CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), - CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), - CellStatus::Live(cell_meta) => Ok(cell_meta), - } - }; + let cell_status = cell_provider.cell(out_point, with_data); + match cell_status { + CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), + CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), + CellStatus::Live(cell_meta) => Ok(cell_meta), + } + }; // skip resolve input of cellbase if !transaction.is_cellbase() { @@ -656,12 +657,9 @@ pub fn resolve_transaction( } } - let mut resolve_cell_alway_with_data = - |out_point: &OutPoint| -> Result { resolve_cell(out_point, true) }; - resolve_transaction_deps_with_system_cell_cache( &transaction, - &mut resolve_cell_alway_with_data, + &mut resolve_cell, &mut resolved_cell_deps, &mut resolved_dep_groups, )?; @@ -680,7 +678,7 @@ pub fn resolve_transaction( } fn resolve_transaction_deps_with_system_cell_cache< - F: FnMut(&OutPoint) -> Result, + F: FnMut(&OutPoint, bool) -> Result, >( transaction: &TransactionView, cell_resolver: &mut F, @@ -720,7 +718,7 @@ fn resolve_transaction_deps_with_system_cell_cache< Ok(()) } -fn resolve_transaction_dep Result>( +fn resolve_transaction_dep Result>( cell_dep: &CellDep, cell_resolver: &mut F, resolved_cell_deps: &mut Vec, @@ -731,7 +729,7 @@ fn resolve_transaction_dep Result Result( cell_provider: &CP, out_point: &OutPoint, + with_data: bool, ) -> Result { - let cell_status = cell_provider.cell(out_point, true); + let cell_status = cell_provider.cell(out_point, with_data); match cell_status { CellStatus::Dead => Err(OutPointError::Dead(out_point.clone())), CellStatus::Unknown => Err(OutPointError::Unknown(out_point.clone())), @@ -782,21 +781,21 @@ pub fn setup_system_cell_cache( let mut cell_deps = HashMap::new(); let secp_code_dep_cell = - build_cell_meta_from_out_point(cell_provider, &secp_code_dep.out_point()) + build_cell_meta_from_out_point(cell_provider, &secp_code_dep.out_point(), true) .expect("resolve secp_code_dep_cell"); cell_deps.insert(secp_code_dep, ResolvedDep::Cell(secp_code_dep_cell)); - let dao_dep_cell = build_cell_meta_from_out_point(cell_provider, &dao_dep.out_point()) + let dao_dep_cell = build_cell_meta_from_out_point(cell_provider, &dao_dep.out_point(), true) .expect("resolve dao_dep_cell"); cell_deps.insert(dao_dep, ResolvedDep::Cell(dao_dep_cell)); let secp_data_dep_cell = - build_cell_meta_from_out_point(cell_provider, &secp_data_dep.out_point()) + build_cell_meta_from_out_point(cell_provider, &secp_data_dep.out_point(), true) .expect("resolve secp_data_dep_cell"); cell_deps.insert(secp_data_dep, ResolvedDep::Cell(secp_data_dep_cell)); - let resolve_cell = |out_point: &OutPoint| -> Result { - build_cell_meta_from_out_point(cell_provider, out_point) + let resolve_cell = |out_point: &OutPoint, with_data: bool| -> Result { + build_cell_meta_from_out_point(cell_provider, out_point, with_data) }; let secp_group_dep_cell = resolve_dep_group(&secp_group_dep.out_point(), resolve_cell) From 30360a2968660599c030c35f3746b3baffb3a584 Mon Sep 17 00:00:00 2001 From: Boyu Yang Date: Fri, 11 Jun 2021 20:35:14 +0800 Subject: [PATCH 18/18] chore(hardfork): apply review suggestions about block timestamp in since --- verification/src/transaction_verifier.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index affd5d274d..f8b1d558af 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -588,11 +588,6 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { self.block_median_time(&parent_hash) } - fn parent_block_time(&self, block_hash: &Byte32) -> u64 { - let (timestamp, _, _) = self.data_loader.timestamp_and_parent(block_hash); - timestamp - } - fn block_median_time(&self, block_hash: &Byte32) -> u64 { if let Some(median_time) = self.median_timestamps_cache.borrow().peek(block_hash) { return *median_time; @@ -692,7 +687,10 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { let base_timestamp = if hardfork_switch .is_block_ts_as_relative_since_start_enabled(epoch_number) { - self.parent_block_time(&info.block_hash) + self.data_loader + .get_header(&info.block_hash) + .expect("header exist") + .timestamp() } else { self.parent_median_time(&info.block_hash) };