From e0a7bc6c048245943796839b166505e2aecdbd7d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:14:10 +0100 Subject: [PATCH 01/46] flaky test fixed --- .../import_notification_sink.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index 7fbdcade63b8..f9a41673bb8f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -326,6 +326,7 @@ mod tests { let j0 = tokio::spawn(runnable); let stream = ctrl.event_stream(); + let stream2 = ctrl.event_stream(); let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); @@ -342,20 +343,16 @@ mod tests { ctrl.add_view(1000, o1); ctrl.add_view(2000, o2); - let j4 = { - let ctrl = ctrl.clone(); - tokio::spawn(async move { - tokio::time::sleep(Duration::from_millis(70)).await; - ctrl.clean_notified_items(&vec![1, 3]); - ctrl.add_view(3000, o3.boxed()); - }) - }; + let out = stream.take(4).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6]); - let out = stream.take(6).collect::>().await; + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + let out = stream2.take(6).collect::>().await; assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); - drop(ctrl); - futures::future::join_all(vec![j0, j1, j2, j3, j4]).await; + drop(ctrl); + futures::future::join_all(vec![j0, j1, j2, j3]).await; } #[tokio::test] From 354d6a3789d888577341bc7608e973f3d3fe574c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:47:30 +0100 Subject: [PATCH 02/46] dropped_watcher: explicit view removal --- .../src/fork_aware_txpool/dropped_watcher.rs | 8 ++--- .../src/fork_aware_txpool/view_store.rs | 31 ++++++++++++++----- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index ecae21395c91..65af89e7b528 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -142,12 +142,7 @@ where self.transaction_states.entry(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() || - views_keeping_tx_valid - .get() - .iter() - .all(|h| !self.stream_map.contains_key(h)) - { + if views_keeping_tx_valid.get().is_empty() { return Some(tx_hash) } } else { @@ -373,6 +368,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); assert!(output_stream.next().now_or_never().is_none()); + watcher.remove_view(block_hash0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index f23dcedd5bfd..8f618fde5613 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -438,27 +438,44 @@ where let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + let mut dropped_views = vec![]; //clean up older then finalized { let mut active_views = self.active_views.write(); - active_views.retain(|hash, v| match finalized_number { - Err(_) | Ok(None) => *hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, + active_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); } { let mut inactive_views = self.inactive_views.write(); - inactive_views.retain(|_, v| match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, + inactive_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } self.listener.remove_view(finalized_hash); + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); From 115155639e02998934a30ed710965d6cb9b82e71 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:41:01 +0100 Subject: [PATCH 03/46] fatp: prios some intial tests added --- .../transaction-pool/tests/fatp_prios.rs | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 substrate/client/transaction-pool/tests/fatp_prios.rs diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs new file mode 100644 index 000000000000..9c70bb4b371c --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of priorities for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{ + finalized_block_event, invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE, +}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; +use std::{thread::sleep, time::Duration}; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_prio_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); + + log::info!("r0 => {:?}", result0); + log::info!("r1 => {:?}", result1); + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); + + // let results = block_on(futures::future::join_all(submissions)); + // assert!(results.iter().all(Result::is_ok)); + // //charlie was not included into view: + // assert_pool_status!(header01.hash(), &pool, 2, 0); + + // //branch with alice transactions: + // let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); + // let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); + // block_on(pool.maintain(event)); + // assert_eq!(pool.mempool_len().0, 2); + // assert_pool_status!(header02b.hash(), &pool, 0, 0); + // assert_ready_iterator!(header02b.hash(), pool, []); + // + // //branch with alice/charlie transactions shall also work: + // let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); + // api.set_nonce(header02a.hash(), Alice.into(), 201); + // let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); + // block_on(pool.maintain(event)); + // assert_eq!(pool.mempool_len().0, 2); + // // assert_pool_status!(header02a.hash(), &pool, 1, 0); + // assert_ready_iterator!(header02a.hash(), pool, [xt2]); +} + +#[test] +fn fatp_prio_watcher_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); + + // let results = block_on(futures::future::join_all(submissions)); + // assert!(results.iter().all(Result::is_ok)); + // //charlie was not included into view: + // assert_pool_status!(header01.hash(), &pool, 2, 0); + + // //branch with alice transactions: + // let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); + // let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); + // block_on(pool.maintain(event)); + // assert_eq!(pool.mempool_len().0, 2); + // assert_pool_status!(header02b.hash(), &pool, 0, 0); + // assert_ready_iterator!(header02b.hash(), pool, []); + // + // //branch with alice/charlie transactions shall also work: + // let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); + // api.set_nonce(header02a.hash(), Alice.into(), 201); + // let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); + // block_on(pool.maintain(event)); + // assert_eq!(pool.mempool_len().0, 2); + // // assert_pool_status!(header02a.hash(), &pool, 1, 0); + // assert_ready_iterator!(header02a.hash(), pool, [xt2]); +} From c9f2d39355853d034fdbc6ea31e4e0e5bf34cb6a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:06:47 +0100 Subject: [PATCH 04/46] base_pool: handling conflicts in future improved --- .../transaction-pool/src/graph/base_pool.rs | 78 +++++++++++++++++-- 1 file changed, 72 insertions(+), 6 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index e4c3a6c425a9..9a87d77c3062 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -322,6 +322,7 @@ impl BasePool { if !first { @@ -331,13 +332,21 @@ impl BasePool + if first { + trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); + return Err(e) + } else { + removed.push(current_tx); + }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); return Err(e) } else { - failed.push(current_hash); + trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); + failed.push(current_tx.hash.clone()); }, } first = false; @@ -760,6 +769,58 @@ mod tests { ); } + #[test] + fn should_remove_conflicting_future() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![3u8].into(), + hash: 3, + requires: vec![vec![1]], + priority: 50u64, + provides: vec![vec![3]], + ..default_tx().clone() + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let tx2 = Transaction { + data: vec![2u8].into(), + hash: 2, + requires: vec![vec![1]], + provides: vec![vec![3]], + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); + assert_eq!(pool.future.len(), 2); + + let res = pool + .import(Transaction { + data: vec![1u8].into(), + hash: 1, + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + + assert_eq!( + res, + Imported::Ready { + hash: 1, + promoted: vec![3], + failed: vec![], + removed: vec![tx2.into()] + } + ); + + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + + assert_eq!(pool.future.len(), 0); + } + #[test] fn should_handle_a_cycle() { // given @@ -783,14 +844,14 @@ mod tests { assert_eq!(pool.ready.len(), 0); // when - pool.import(Transaction { + let tx2 = Transaction { data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], ..default_tx().clone() - }) - .unwrap(); + }; + pool.import(tx2.clone()).unwrap(); // then { @@ -817,7 +878,12 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![], + removed: vec![tx2.into()] + } ); assert_eq!(pool.future.len(), 0); } From 406c37b50703dd376eb12b4a4f29de0e4f40e1c7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:31:26 +0100 Subject: [PATCH 05/46] dropped_watcher: rename C -> ChainApi --- .../src/fork_aware_txpool/dropped_watcher.rs | 55 ++++++++++--------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 65af89e7b528..29a5f00cb88f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -24,7 +24,7 @@ use crate::{ common::log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, - graph::{BlockHash, ChainApi, ExtrinsicHash}, + graph::{self, BlockHash, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; @@ -59,24 +59,24 @@ type Controller = mpsc::TracingUnboundedSender; type CommandReceiver = mpsc::TracingUnboundedReceiver; /// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. -enum Command +enum Command where - C: ChainApi, + ChainApi: graph::ChainApi, { /// Adds a new stream of dropped-related events originating in a view with a specific block /// hash - AddView(BlockHash, ViewStream), + AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. - RemoveView(BlockHash), + RemoveView(BlockHash), /// Removes internal states for given extrinsic hashes. /// /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + RemoveFinalizedTxs(Vec>), } -impl Debug for Command +impl Debug for Command where - C: ChainApi, + ChainApi: graph::ChainApi, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { @@ -92,29 +92,29 @@ where /// /// This struct maintains a mapping of active views and their corresponding streams, as well as the /// state of each transaction with respect to these views. -struct MultiViewDropWatcherContext +struct MultiViewDropWatcherContext where - C: ChainApi, + ChainApi: graph::ChainApi, { /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. - stream_map: StreamMap, ViewStream>, + stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. - command_receiver: CommandReceiver>, + command_receiver: CommandReceiver>, /// For each transaction hash we keep the set of hashes representing the views that see this /// transaction as ready or future. /// /// Once transaction is dropped, dropping view is removed from the set. - transaction_states: HashMap, HashSet>>, + transaction_states: HashMap, HashSet>>, } impl MultiViewDropWatcherContext where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. @@ -220,30 +220,30 @@ where /// /// This struct provides methods to add and remove streams associated with views to and from the /// stream. -pub struct MultiViewDroppedWatcherController { +pub struct MultiViewDroppedWatcherController { /// A controller allowing to update the state of the associated [`StreamOfDropped`]. - controller: Controller>, + controller: Controller>, } -impl Clone for MultiViewDroppedWatcherController { +impl Clone for MultiViewDroppedWatcherController { fn clone(&self) -> Self { Self { controller: self.controller.clone() } } } -impl MultiViewDroppedWatcherController +impl MultiViewDroppedWatcherController where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Creates new [`StreamOfDropped`] and its controller. - pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { - let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); (Self { controller: ctrl }, stream_map.boxed()) } /// Notifies the [`StreamOfDropped`] that new view was created. - pub fn add_view(&self, key: BlockHash, view: ViewStream) { + pub fn add_view(&self, key: BlockHash, view: ViewStream) { let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); }); @@ -251,14 +251,17 @@ where /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the /// stream map. - pub fn remove_view(&self, key: BlockHash) { + pub fn remove_view(&self, key: BlockHash) { let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); }); } /// Removes status info for finalized transactions. - pub fn remove_finalized_txs(&self, xts: impl IntoIterator> + Clone) { + pub fn remove_finalized_txs( + &self, + xts: impl IntoIterator> + Clone, + ) { let _ = self .controller .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) From fb5e35576b0be3ffe8436c03e846b15f5517b93f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:32:06 +0100 Subject: [PATCH 06/46] base_pool: handling conflicts in future (2) --- .../client/transaction-pool/src/graph/base_pool.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 9a87d77c3062..33526564d172 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -326,26 +326,31 @@ impl BasePool { if !first { - promoted.push(current_hash); + promoted.push(current_hash.clone()); } + // If there were conflicting future transactions promoted, removed them from + // promoted set. + promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, Err(e @ error::Error::TooLowPriority { .. }) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); removed.push(current_tx); + promoted.retain(|hash| *hash != current_hash); }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_tx.hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); failed.push(current_tx.hash.clone()); }, } From e798e3457fc7d91a581191c3496c0f70acd97322 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:39:54 +0100 Subject: [PATCH 07/46] fatp: DroppedTransaction + DroppedReason added --- .../src/fork_aware_txpool/dropped_watcher.rs | 64 ++++++++++++++++--- .../fork_aware_txpool/fork_aware_txpool.rs | 5 +- .../fork_aware_txpool/multi_view_listener.rs | 32 ++++++---- .../src/fork_aware_txpool/tx_mem_pool.rs | 20 +++--- .../transaction-pool/src/graph/listener.rs | 11 +++- 5 files changed, 95 insertions(+), 37 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 29a5f00cb88f..8f1660f373e2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -39,6 +39,35 @@ use std::{ }; use tokio_stream::StreamMap; +/// Represents a transaction that was removed from the transaction pool, including the reason of its +/// removal. +#[derive(Debug, PartialEq)] +pub struct DroppedTransaction { + /// Hash of the dropped extrinsic. + pub tx_hash: Hash, + /// Reason of the transaction being dropped. + pub reason: DroppedReason, +} + +impl DroppedTransaction { + fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + Self { reason: DroppedReason::Usurped(by), tx_hash } + } + + fn new_enforced_by_limts(tx_hash: Hash) -> Self { + Self { reason: DroppedReason::LimitsEnforced, tx_hash } + } +} + +/// Provides reason of why transactions was dropped. +#[derive(Debug, PartialEq)] +pub enum DroppedReason { + /// Transaction was replaced by other transaction (e.g. because of higher priority). + Usurped(Hash), + /// Transaction was dropped because of internal pool limits being enforced. + LimitsEnforced, +} + /// Dropped-logic related event from the single view. pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; @@ -47,7 +76,8 @@ type ViewStream = Pin> + Se /// Stream of extrinsic hashes that were dropped by the views and have no references by existing /// views. -pub(crate) type StreamOfDropped = Pin> + Send>>; +pub(crate) type StreamOfDropped = + Pin>> + Send>>; /// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. /// Used to send control commands from the [`MultiViewDroppedWatcherController`] to @@ -125,7 +155,7 @@ where &mut self, block_hash: BlockHash, event: ViewStreamEvent, - ) -> Option> { + ) -> Option>> { trace!( target: LOG_TARGET, "dropped_watcher: handle_event: event:{:?} views:{:?}, ", @@ -137,17 +167,30 @@ where TransactionStatus::Ready | TransactionStatus::Future => { self.transaction_states.entry(tx_hash).or_default().insert(block_hash); }, - TransactionStatus::Dropped | TransactionStatus::Usurped(_) => { + TransactionStatus::Dropped => { + if let Entry::Occupied(mut views_keeping_tx_valid) = + self.transaction_states.entry(tx_hash) + { + views_keeping_tx_valid.get_mut().remove(&block_hash); + if views_keeping_tx_valid.get().is_empty() { + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } else { + debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + }, + TransactionStatus::Usurped(by) => { if let Entry::Occupied(mut views_keeping_tx_valid) = self.transaction_states.entry(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); if views_keeping_tx_valid.get().is_empty() { - return Some(tx_hash) + return Some(DroppedTransaction::new_usurped(tx_hash, by)) } } else { debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(tx_hash) + return Some(DroppedTransaction::new_usurped(tx_hash, by)) } }, _ => {}, @@ -296,7 +339,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash, view_stream); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -346,7 +389,10 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash1]); + assert_eq!( + handle.await.unwrap(), + vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] + ); } #[tokio::test] @@ -375,7 +421,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -418,6 +464,6 @@ mod dropped_watcher_tests { let block_hash2 = H256::repeat_byte(0x03); watcher.add_view(block_hash2, view_stream2); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index a342d35b2844..ea39eeb139d5 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -252,8 +252,9 @@ where break; }; log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); - mempool.remove_dropped_transactions(&[dropped]).await; - import_notification_sink.clean_notified_items(&[dropped]); + let tx_hash = dropped.tx_hash; + mempool.remove_dropped_transaction(dropped).await; + import_notification_sink.clean_notified_items(&[tx_hash]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 8d0e69db2e9a..081afccf2d7d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -36,6 +36,8 @@ use std::{ }; use tokio_stream::StreamMap; +use super::dropped_watcher::{DroppedReason, DroppedTransaction}; + /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. /// @@ -79,7 +81,7 @@ enum ControllerCommand { /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped, + TransactionDropped(DroppedReason>), } impl std::fmt::Debug for ControllerCommand @@ -99,8 +101,8 @@ where ControllerCommand::TransactionBroadcasted(_) => { write!(f, "ListenerAction::TransactionBroadcasted(...)") }, - ControllerCommand::TransactionDropped => { - write!(f, "ListenerAction::TransactionDropped") + ControllerCommand::TransactionDropped(r) => { + write!(f, "ListenerAction::TransactionDropped {r:?}") }, } } @@ -346,11 +348,16 @@ where log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); return Some((TransactionStatus::Broadcast(peers), ctx)) }, - ControllerCommand::TransactionDropped => { + ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, + ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + ctx.terminate = true; + return Some((TransactionStatus::Usurped(by), ctx)) + }, } }, }; @@ -445,16 +452,15 @@ where /// /// This method sends a `TransactionDropped` command to the controller of each requested /// transaction prompting and external `Broadcasted` event. - pub(crate) fn transactions_dropped(&self, dropped: &[ExtrinsicHash]) { + pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transactions_dropped: {:?}", dropped); - for tx_hash in dropped { - if let Some(tx) = controllers.remove(&tx_hash) { - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped) { - trace!(target: LOG_TARGET, "[{:?}] transactions_dropped: send message failed: {:?}", tx_hash, e); - }; - } + debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + if let Some(tx) = controllers.remove(&dropped.tx_hash) { + let DroppedTransaction { tx_hash, reason } = dropped; + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + }; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 86c07008c3f3..be00471317b8 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,7 +26,10 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use super::{ + dropped_watcher::DroppedTransaction, metrics::MetricsLink as PrometheusMetrics, + multi_view_listener::MultiViewListener, +}; use crate::{ common::log_xt::log_xt_trace, graph, @@ -301,18 +304,13 @@ where /// Removes transactions from the memory pool which are specified by the given list of hashes /// and send the `Dropped` event to the listeners of these transactions. - pub(super) async fn remove_dropped_transactions( + pub(super) async fn remove_dropped_transaction( &self, - to_be_removed: &[ExtrinsicHash], + dropped: DroppedTransaction>, ) { - log::debug!(target: LOG_TARGET, "remove_dropped_transactions count:{:?}", to_be_removed.len()); - log_xt_trace!(target: LOG_TARGET, to_be_removed, "[{:?}] mempool::remove_dropped_transactions"); - let mut transactions = self.transactions.write(); - to_be_removed.iter().for_each(|t| { - transactions.remove(t); - }); - - self.listener.transactions_dropped(to_be_removed); + log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); + self.transactions.write().remove(&dropped.tx_hash); + self.listener.transaction_dropped(dropped); } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index a5593920eec4..9773c8f43307 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -36,6 +36,7 @@ pub type DroppedByLimitsStream = TracingUnboundedReceiver { + /// Map containing per-transaction sinks for emitting transaction status events. watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, @@ -136,8 +137,14 @@ impl Listener Date: Thu, 7 Nov 2024 14:41:09 +0100 Subject: [PATCH 08/46] graph: dedicated usurped/limit_enforced calls added --- .../transaction-pool/src/graph/listener.rs | 48 +++++++++++-------- .../src/graph/validated_pool.rs | 8 ++-- .../transaction-pool/src/graph/watcher.rs | 6 +++ 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 9773c8f43307..41daf5491f70 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -120,38 +120,44 @@ impl Listener, limits_enforced: bool) { + /// Transaction was dropped from the pool because of enforcing the limit. + pub fn limit_enforced(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx); + self.fire(tx, |watcher| watcher.limit_enforced()); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was replaced with other extrinsic. + pub fn usurped(&mut self, tx: &H, by: &H) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }); + self.fire(tx, |watcher| watcher.usurped(by.clone())); - //note: LimitEnforced could be introduced as new status to get rid of this flag. if let Some(ref sink) = self.dropped_by_limits_sink { - if let Some(t) = by { - if let Err(e) = - sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(t.clone()))) - { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); - } - } else if limits_enforced { - if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); - } + if let Err(e) = + sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) + { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); } } } + /// Transaction was dropped from the pool because of the failure during the resubmission of + /// revalidate transactions or failure during pruning tags. + pub fn dropped(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); + self.fire(tx, |watcher| watcher.dropped()); + } + /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index d7f55198a40a..18ddc59eee8a 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -280,7 +280,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.dropped(h, None, true); + listener.limit_enforced(h); } removed @@ -453,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None, false), + Status::Dropped => listener.dropped(&hash), Status::Failed => listener.invalid(&hash), } } @@ -492,7 +492,7 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f, None, false); + listener.dropped(f); } } @@ -682,7 +682,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash), false)); + removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index fb7cf99d4dc6..2fd31e772fd8 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,6 +113,12 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. + pub fn limit_enforced(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; From 322774c9c80d5343c5b32f1b23fd36d84058bf68 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:42:08 +0100 Subject: [PATCH 09/46] dropped_watcher: todo added --- .../transaction-pool/src/fork_aware_txpool/dropped_watcher.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 8f1660f373e2..390d552cc037 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -180,6 +180,10 @@ where return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, + // todo: + // 1. usurpued shall be sent unconditionally + // 2. fatp shall act for every usurped message - it should remove tx from every view and + // replace it with new one (also in mempool). TransactionStatus::Usurped(by) => { if let Entry::Occupied(mut views_keeping_tx_valid) = self.transaction_states.entry(tx_hash) From a116c201be8d4498f08226e2d6b01fcf0cae5c2f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:42:32 +0100 Subject: [PATCH 10/46] test: prios tests --- .../transaction-pool/tests/fatp_prios.rs | 65 +++++++++++++------ 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 9c70bb4b371c..e254a3d8171d 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -107,7 +107,10 @@ fn fatp_prio_watcher_ready_higher_evicts_lower() { block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); assert_eq!(xt1_status, vec![TransactionStatus::Ready]); @@ -115,26 +118,46 @@ fn fatp_prio_watcher_ready_higher_evicts_lower() { log::info!("len: {:?}", pool.status_all()[&header01.hash()]); assert_ready_iterator!(header01.hash(), pool, [xt1]); assert_pool_status!(header01.hash(), &pool, 1, 0); +} - // let results = block_on(futures::future::join_all(submissions)); - // assert!(results.iter().all(Result::is_ok)); - // //charlie was not included into view: - // assert_pool_status!(header01.hash(), &pool, 2, 0); +#[test] +fn fatp_prio_watcher_future_higher_evicts_lower() { + sp_tracing::try_init_simple(); - // //branch with alice transactions: - // let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); - // let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); - // block_on(pool.maintain(event)); - // assert_eq!(pool.mempool_len().0, 2); - // assert_pool_status!(header02b.hash(), &pool, 0, 0); - // assert_ready_iterator!(header02b.hash(), pool, []); - // - // //branch with alice/charlie transactions shall also work: - // let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); - // api.set_nonce(header02a.hash(), Alice.into(), 201); - // let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); - // block_on(pool.maintain(event)); - // assert_eq!(pool.mempool_len().0, 2); - // // assert_pool_status!(header02a.hash(), &pool, 1, 0); - // assert_ready_iterator!(header02a.hash(), pool, [xt2]); + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + assert_eq!(pool.mempool_len().1, 2); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_pool_status!(header01.hash(), &pool, 2, 0); } From 560db28c987dd1e634119788ebc8318967df206b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:03:41 +0100 Subject: [PATCH 11/46] dropped_watcher: dropping future transcations improved --- .../src/fork_aware_txpool/dropped_watcher.rs | 118 ++++++++++++++---- .../fork_aware_txpool/multi_view_listener.rs | 1 + .../src/fork_aware_txpool/tx_mem_pool.rs | 2 +- .../src/fork_aware_txpool/view_store.rs | 7 +- 4 files changed, 103 insertions(+), 25 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 390d552cc037..8de10555ea33 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -33,7 +33,10 @@ use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, fmt::{self, Debug, Formatter}, pin::Pin, }; @@ -98,7 +101,7 @@ where AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. RemoveView(BlockHash), - /// Removes internal states for given extrinsic hashes. + /// Removes referencing views for given extrinsic hashes. /// /// Intended to ba called on finalization. RemoveFinalizedTxs(Vec>), @@ -129,16 +132,29 @@ where /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. + /// todo: rename: view_stream map stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. command_receiver: CommandReceiver>, - /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as ready or future. + /// transaction as ready or in_block. + /// + /// Even if all views referencing a ready transactions are removed, we still want to keep + /// transaction, there can be a fork which sees the transaction as ready. + /// + /// Once transaction is dropped, dropping view is removed from the set. + ready_transaction_views: HashMap, HashSet>>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as future. + /// + /// Once all views referencing a future transactions are removed, the future can be dropped. /// /// Once transaction is dropped, dropping view is removed from the set. - transaction_states: HashMap, HashSet>>, + future_transaction_views: HashMap, HashSet>>, + + /// Transactions that need to be notified as dropped. + pending_dropped_transactions: Vec>, } impl MultiViewDropWatcherContext @@ -146,6 +162,23 @@ where C: graph::ChainApi + 'static, <::Block as BlockT>::Hash: Unpin, { + /// Provides the ready or future `HashSet` containing views referencing given transaction. + fn get_transaction_views( + &mut self, + tx_hash: ExtrinsicHash, + ) -> Option, HashSet>>> { + if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + if let Entry::Occupied(views_keeping_tx_valid) = + self.future_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + None + } + /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. /// @@ -164,13 +197,19 @@ where ); let (tx_hash, status) = event; match status { - TransactionStatus::Ready | TransactionStatus::Future => { - self.transaction_states.entry(tx_hash).or_default().insert(block_hash); + TransactionStatus::Future => { + self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); + }, + TransactionStatus::Ready | TransactionStatus::InBlock(..) => { + if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { + views.insert(block_hash); + self.ready_transaction_views.insert(tx_hash, views); + } else { + self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); + } }, TransactionStatus::Dropped => { - if let Entry::Occupied(mut views_keeping_tx_valid) = - self.transaction_states.entry(tx_hash) - { + if let Some(mut views_keeping_tx_valid) = self.get_transaction_views(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); if views_keeping_tx_valid.get().is_empty() { return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) @@ -186,7 +225,7 @@ where // replace it with new one (also in mempool). TransactionStatus::Usurped(by) => { if let Entry::Occupied(mut views_keeping_tx_valid) = - self.transaction_states.entry(tx_hash) + self.ready_transaction_views.entry(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); if views_keeping_tx_valid.get().is_empty() { @@ -202,6 +241,25 @@ where None } + /// Gets pending dropped transactions if any. + fn get_pending_dropped_transaction(&mut self) -> Option>> { + while let Some(tx_hash) = self.pending_dropped_transactions.pop() { + // never drop transaction that was seens as ready. It may not have a referencing + // view now, but such fork can appear. + if let Some(_) = self.ready_transaction_views.get(&tx_hash) { + continue + } + + if let Some(views) = self.future_transaction_views.get(&tx_hash) { + if views.is_empty() { + self.future_transaction_views.remove(&tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } + } + return None + } + /// Creates a new `StreamOfDropped` and its associated event stream controller. /// /// This method initializes the internal structures and unfolds the stream of dropped @@ -218,13 +276,25 @@ where let ctx = Self { stream_map: StreamMap::new(), command_receiver, - transaction_states: Default::default(), + ready_transaction_views: Default::default(), + future_transaction_views: Default::default(), + pending_dropped_transactions: Default::default(), }; let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { + if let Some(dropped) = ctx.get_pending_dropped_transaction() { + debug!("dropped_watcher: sending out (pending): {dropped:?}"); + return Some((dropped, ctx)); + } tokio::select! { biased; + Some(event) = next_event(&mut ctx.stream_map) => { + if let Some(dropped) = ctx.handle_event(event.0, event.1) { + debug!("dropped_watcher: sending out: {dropped:?}"); + return Some((dropped, ctx)); + } + }, cmd = ctx.command_receiver.next() => { match cmd? { Command::AddView(key,stream) => { @@ -234,26 +304,30 @@ where Command::RemoveView(key) => { trace!(target: LOG_TARGET,"dropped_watcher: Command::RemoveView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); ctx.stream_map.remove(&key); - ctx.transaction_states.iter_mut().for_each(|(_,state)| { - state.remove(&key); + ctx.ready_transaction_views.iter_mut().for_each(|(tx_hash,views)| { + trace!(target: LOG_TARGET,"[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}",tx_hash, views); + views.remove(&key); + }); + + ctx.future_transaction_views.iter_mut().for_each(|(tx_hash,views)| { + trace!(target: LOG_TARGET,"[{:?}] dropped_watcher: Command::RemoveView future views: {:?}",tx_hash, views); + views.remove(&key); + if views.is_empty() { + ctx.pending_dropped_transactions.push(*tx_hash); + } }); }, Command::RemoveFinalizedTxs(xts) => { log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: finalized xt removed"); xts.iter().for_each(|xt| { - ctx.transaction_states.remove(xt); + ctx.ready_transaction_views.remove(xt); + ctx.future_transaction_views.remove(xt); }); }, } - }, - - Some(event) = next_event(&mut ctx.stream_map) => { - if let Some(dropped) = ctx.handle_event(event.0, event.1) { - debug!("dropped_watcher: sending out: {dropped:?}"); - return Some((dropped, ctx)); - } } + } } }) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 081afccf2d7d..52acb22d8a6b 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -270,6 +270,7 @@ where /// stream map. fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); + self.views_keeping_tx_valid.remove(&block_hash); trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index be00471317b8..16952afe64bd 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -401,7 +401,7 @@ where log::debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} purged:{} took {duration:?}", invalid_hashes.len(), + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), ); invalid_hashes diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 8f618fde5613..e2af6d3851bb 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -471,13 +471,16 @@ where log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } + log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + + self.listener.remove_stale_controllers(); + self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + self.listener.remove_view(finalized_hash); for view in dropped_views { self.listener.remove_view(view); self.dropped_stream_controller.remove_view(view); } - self.listener.remove_stale_controllers(); - self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); finalized_xts } From 5a83cf5c3acd2e48b2a06b47665773b4e0cb761a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:14:34 +0100 Subject: [PATCH 12/46] test fixed --- substrate/client/transaction-pool/tests/fatp.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 9f343a9bd029..a9ad7110f00a 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2267,19 +2267,13 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - // Import enough blocks to make xt4i revalidated - let mut prev_header = header03; - // wait 10 blocks for revalidation - for n in 7..=11 { - let header = api.push_block(n, vec![], true); - let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); - block_on(pool.maintain(event)); - prev_header = header; - } + let event = finalized_block_event(&pool, header03.hash(), header06.hash()); + block_on(pool.maintain(event)); let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Invalid]); + + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); assert_eq!(pool.mempool_len(), (0, 0)); } From c566dada426ed9403a030e22ff2dc8068b5980a1 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:15:17 +0100 Subject: [PATCH 13/46] tests: future dropping test added --- .../transaction-pool/tests/fatp_limits.rs | 236 +++++++++++++++++- 1 file changed, 235 insertions(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 03792fd89dfa..fbc9075af0aa 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -28,7 +28,7 @@ use sc_transaction_pool::ChainApi; use sc_transaction_pool_api::{ error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; -use std::thread::sleep; +use std::{collections::HashSet, thread::sleep}; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; @@ -641,3 +641,237 @@ fn fatp_limits_future_size_works() { assert_pool_status!(header01.hash(), &pool, 0, 3); assert_eq!(pool.mempool_len().0, 3); } + +// this test is bad by design: xt0, xt1 cannot be dropped by the pool after finalizing 02 - there +// can be new fork where xt0, xt1 will be valid. +// todo: do we need this test? +#[test] +#[ignore] +fn fatp_limits_watcher_xxx() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 6); + + let header04 = + api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); + api.set_nonce(header04.hash(), Alice.into(), 201); + api.set_nonce(header04.hash(), Bob.into(), 301); + api.set_nonce(header04.hash(), Charlie.into(), 401); + api.set_nonce(header04.hash(), Dave.into(), 501); + api.set_nonce(header04.hash(), Eve.into(), 601); + api.set_nonce(header04.hash(), Ferdie.into(), 701); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); + assert_ready_iterator!(header04.hash(), pool, []); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + assert!(!pool.status_all().contains_key(&header01.hash())); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + assert!(!pool.status_all().contains_key(&header02.hash())); + assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 4 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout"); + } + } + assert_eq!(pool.mempool_len().1, 4); + + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + log::info!("status: {:?}", pool.status_all()); + assert!(!pool.status_all().contains_key(&header03.hash())); + assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); + + // + // + // + // + + // assert_pool_status!(header02e.hash(), &pool, 0, 0); + // + // let header02f = api.push_block_with_parent(header01.hash(), vec![], true); + // block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), + // header02f.hash()))); assert_pool_status!(header02f.hash(), &pool, 2, 0); + // assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); + // + // let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, + // xt3.clone())).unwrap(); let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), + // SOURCE, xt4.clone())).unwrap(); let result5 = block_on(pool.submit_and_watch(invalid_hash(), + // SOURCE, xt5.clone())).map(|_| ()); + // + // //xt5 hits internal mempool limit + // assert!(matches!(result5.unwrap_err().0, TxPoolError::ImmediatelyDropped)); + // + // assert_pool_status!(header02e.hash(), &pool, 2, 0); + // assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]); + // assert_eq!(pool.mempool_len().1, 4); + // + // let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + // assert_eq!( + // xt1_status, + // vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] + // ); + // + // let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + // assert_eq!( + // xt2_status, + // vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] + // ); + // + // let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); + // assert_eq!(xt3_status, vec![TransactionStatus::Ready]); + // let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); + // assert_eq!(xt4_status, vec![TransactionStatus::Ready]); +} + +macro_rules! assert_future_iterator { + ($pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + +#[test] +fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Bob, 301); + let xt2 = uxt(Charlie, 401); + + let xt3 = uxt(Dave, 501); + let xt4 = uxt(Eve, 601); + let xt5 = uxt(Ferdie, 701); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 2); + assert_future_iterator!(pool, [xt0, xt1]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 4); + assert_future_iterator!(pool, [xt2, xt3]); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 6); + assert_future_iterator!(pool, [xt4, xt5]); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 0); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout {}", pool.mempool_len().1); + } + } + assert_eq!(pool.mempool_len().1, 2); + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + let mut to_be_checked = vec![ + (xt0, xt0_watcher), + (xt1, xt1_watcher), + (xt2, xt2_watcher), + (xt3, xt3_watcher), + (xt4, xt4_watcher), + (xt5, xt5_watcher), + ]; + let future_hashes = pool.futures().iter().map(|t| t.hash).collect::>(); + to_be_checked.retain(|x| !future_hashes.contains(&api.hash_and_length(&x.0).0)); + + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x.1).take(2).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + } +} From f10590f3bde69b31250761a5b10802fb139ab2b2 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:58:10 +0100 Subject: [PATCH 14/46] fatp: TimedTransactionSource added --- .../client/transaction-pool/benches/basics.rs | 4 +- .../fork_aware_txpool/fork_aware_txpool.rs | 111 +++++++++--------- .../fork_aware_txpool/revalidation_worker.rs | 9 +- .../src/fork_aware_txpool/tx_mem_pool.rs | 60 +++++++--- .../src/fork_aware_txpool/view.rs | 25 ++-- .../src/fork_aware_txpool/view_store.rs | 17 +-- .../transaction-pool/src/graph/base_pool.rs | 60 ++++++++-- .../client/transaction-pool/src/graph/pool.rs | 30 +++-- .../transaction-pool/src/graph/ready.rs | 5 +- .../transaction-pool/src/graph/rotator.rs | 5 +- .../src/graph/validated_pool.rs | 21 +++- substrate/client/transaction-pool/src/lib.rs | 5 +- .../src/single_state_txpool/revalidation.rs | 25 ++-- .../single_state_txpool.rs | 79 ++++++++----- 14 files changed, 278 insertions(+), 178 deletions(-) diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 0d8c1cbba9b4..5e40b0fb72d6 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -152,7 +152,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TransactionSource::External; + let source = TimedTransactionSource::new_external(false); let mut futures = Vec::new(); let mut tags = Vec::new(); let at = HashAndNumber { @@ -171,7 +171,7 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&at, source, xt)); + futures.push(pool.submit_one(&at, source.clone(), xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index ea39eeb139d5..4ef070615acb 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,7 +23,7 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - tx_mem_pool::{TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, view_store::ViewStore, }; @@ -32,7 +32,11 @@ use crate::{ common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, fork_aware_txpool::revalidation_worker, - graph::{self, base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + }, PolledIterator, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -604,13 +608,19 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return future::ready(Ok(mempool_results)).boxed() + return future::ready(Ok(mempool_results + .into_iter() + .map(|r| r.map(|r| r.hash)) + .collect::>())) + .boxed() } let to_be_submitted = mempool_results .iter() .zip(xts) - .filter_map(|(result, xt)| result.as_ref().ok().map(|_| xt)) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + }) .collect::>(); self.metrics @@ -618,18 +628,18 @@ where let mempool = self.mempool.clone(); async move { - let results_map = view_store.submit(source, to_be_submitted.into_iter()).await; + let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|xt_hash| { + result.and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") .inspect_err(|_| - mempool.remove(xt_hash) + mempool.remove(insertion.hash) ) }) }) @@ -672,10 +682,11 @@ where ) -> PoolFuture>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let xt_hash = match self.mempool.push_watched(source, xt.clone()) { - Ok(xt_hash) => xt_hash, - Err(e) => return future::ready(Err(e)).boxed(), - }; + let InsertionInfo { hash: xt_hash, source: timed_source } = + match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(e) => return future::ready(Err(e)).boxed(), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); @@ -683,7 +694,7 @@ where let mempool = self.mempool.clone(); async move { view_store - .submit_and_watch(at, source, xt) + .submit_and_watch(at, timed_source, xt) .await .inspect_err(|_| mempool.remove(xt_hash)) } @@ -816,12 +827,12 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let result = self + let InsertionInfo { hash: xt_hash, .. } = self .mempool .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(result)) + self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) } } @@ -929,6 +940,9 @@ where let start = Instant::now(); let watched_xts = self.register_listeners(&mut view).await; let duration = start.elapsed(); + // sync the transactions statuses and referencing views in all the listeners with newly + // cloned view. + view.pool.validated_pool().retrigger_notifications(); log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); // 2. Handle transactions from the tree route. Pruning transactions from the view first @@ -1061,46 +1075,33 @@ where let mut all_submitted_count = 0; if !xts.is_empty() { let unwatched_count = xts.len(); - let mut buckets = HashMap::>>::default(); - xts.into_iter() + let xts_with_src = xts + .into_iter() .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(_, tx)| (tx.source(), tx.tx())) - .for_each(|(source, tx)| buckets.entry(source).or_default().push(tx)); + .map(|(_, tx)| (tx.source(), tx.tx())); - for (source, xts) in buckets { - all_submitted_count += xts.len(); - let _ = view.submit_many(source, xts).await; - } + let results = view.submit_many(xts_with_src).await; + all_submitted_count = results.len(); log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); } - let watched_submitted_count = watched_xts.len(); - - let mut buckets = HashMap::< - TransactionSource, - Vec<(ExtrinsicHash, ExtrinsicFor)>, - >::default(); - watched_xts + let watched_xts_filtered = watched_xts .into_iter() .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx.source(), tx_hash, tx.tx())) - .for_each(|(source, tx_hash, tx)| { - buckets.entry(source).or_default().push((tx_hash, tx)) - }); + .map(|(tx_hash, tx)| (tx_hash, tx.source(), tx.tx())) + .collect::>(); - let mut watched_results = Vec::default(); - for (source, watched_xts) in buckets { - let hashes = watched_xts.iter().map(|i| i.0).collect::>(); - let results = view - .submit_many(source, watched_xts.into_iter().map(|i| i.1)) - .await - .into_iter() - .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) - .collect::>(); - watched_results.extend(results); - } + let watched_submitted_count = watched_xts_filtered.len(); + + let hashes = watched_xts_filtered.iter().map(|i| i.0).collect::>(); + let watched_results = view + .submit_many(watched_xts_filtered.into_iter().map(|i| (i.1, i.2))) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); @@ -1191,7 +1192,14 @@ where }) .map(|(tx_hash, tx)| { //find arc if tx is known - self.mempool.get_by_hash(tx_hash).unwrap_or_else(|| Arc::from(tx)) + self.mempool + .get_by_hash(tx_hash) + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }) }), ); @@ -1200,16 +1208,7 @@ where }); } - let _ = view - .pool - .resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index 9464ab3f5766..eb898c35a134 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -186,9 +186,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::Alice; #[test] @@ -212,9 +212,10 @@ mod tests { nonce: 0, }); - let _ = block_on( - view.submit_many(TransactionSource::External, std::iter::once(uxt.clone().into())), - ); + let _ = block_on(view.submit_many(std::iter::once(( + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )))); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 16952afe64bd..09b81be77663 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -33,7 +33,7 @@ use super::{ use crate::{ common::log_xt::log_xt_trace, graph, - graph::{tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, LOG_TARGET, }; use futures::FutureExt; @@ -77,7 +77,7 @@ where /// Size of the extrinsics actual body. bytes: usize, /// Transaction source. - source: TransactionSource, + source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, //todo: we need to add future / ready status at finalized block. @@ -104,12 +104,24 @@ where /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: false, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: false, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: true, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: true, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Provides a clone of actual transaction body. @@ -120,8 +132,8 @@ where } /// Returns the source of the transaction. - pub(crate) fn source(&self) -> TransactionSource { - self.source + pub(crate) fn source(&self) -> TimedTransactionSource { + self.source.clone() } } @@ -177,6 +189,19 @@ where max_transactions_total_bytes: usize, } +/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. +#[derive(Debug)] +pub(super) struct InsertionInfo { + pub(super) hash: Hash, + pub(super) source: TimedTransactionSource, +} + +impl InsertionInfo { + fn new(hash: Hash, source: TimedTransactionSource) -> Self { + Self { hash, source } + } +} + impl TxMemPool where Block: BlockT, @@ -223,8 +248,8 @@ where pub(super) fn get_by_hash( &self, hash: ExtrinsicHash, - ) -> Option> { - self.transactions.read().get(&hash).map(|t| t.tx()) + ) -> Option>> { + self.transactions.read().get(&hash).map(Clone::clone) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. @@ -252,7 +277,7 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); let result = match ( @@ -260,14 +285,15 @@ where transactions.contains_key(&hash), ) { (true, false) => { + let source = tx.source(); transactions.insert(hash, Arc::from(tx)); - Ok(hash) + Ok(InsertionInfo::new(hash, source)) }, (_, true) => Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result); + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } @@ -280,7 +306,7 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec, ChainApi::Error>> { + ) -> Vec>, ChainApi::Error>> { let result = xts .iter() .map(|xt| { @@ -297,7 +323,7 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } @@ -367,13 +393,13 @@ where }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { - self.api.validate_transaction(finalized_block.hash, xt.source, xt.tx()).map( - move |validation_result| { + self.api + .validate_transaction(finalized_block.hash, xt.source.source, xt.tx()) + .map(move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); (xt_hash, validation_result) - }, - ) + }) }); let validation_results = futures::future::join_all(validations_futures).await; let input_len = validation_results.len(); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 99095d88cb0a..52855994d9b9 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -27,13 +27,13 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ common::log_xt::log_xt_trace, graph::{ - self, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedTransaction, - ValidatedTransactionFor, + self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, + IsValidator, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; use sp_blockchain::HashAndNumber; use sp_runtime::{ generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, @@ -157,22 +157,21 @@ where /// Imports many unvalidated extrinsics into the view. pub(super) async fn submit_many( &self, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); - self.pool.submit_at(&self.at, source, xts).await + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, xts).await } else { - self.pool.submit_at(&self.at, source, xts).await + self.pool.submit_at(&self.at, xts).await } } /// Import a single extrinsic and starts to watch its progress in the view. pub(super) async fn submit_and_watch( &self, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); @@ -193,7 +192,7 @@ where .api() .validate_transaction_blocking( self.at.hash, - TransactionSource::Local, + sc_transaction_pool_api::TransactionSource::Local, Arc::from(xt.clone()), )? .map_err(|e| { @@ -214,7 +213,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(true), Arc::from(xt), length, validity, @@ -285,7 +284,7 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); + let validation_result = (api.validate_transaction(self.at.hash, tx.source.source, tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -324,7 +323,7 @@ where ValidatedTransaction::valid_at( self.at.number.saturated_into::(), tx_hash, - tx.source, + tx.source.clone(), tx.data.clone(), api.hash_and_length(&tx.data).1, validity, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index e2af6d3851bb..41e3fff29f5f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -24,14 +24,17 @@ use super::{ }; use crate::{ fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, - graph, - graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, TransactionFor}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, TransactionFor, + }, ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; use itertools::Itertools; use parking_lot::RwLock; -use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{collections::HashMap, sync::Arc, time::Instant}; @@ -89,8 +92,7 @@ where /// Imports a bunch of unverified extrinsics to every active view. pub(super) async fn submit( &self, - source: TransactionSource, - xts: impl IntoIterator> + Clone, + xts: impl IntoIterator)> + Clone, ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); @@ -99,7 +101,7 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(source, xts).await) } + async move { (view.at.hash, view.submit_many(xts).await) } }) .collect::>() }; @@ -145,7 +147,7 @@ where pub(super) async fn submit_and_watch( &self, _at: Block::Hash, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; @@ -159,6 +161,7 @@ where .map(|(_, view)| { let view = view.clone(); let xt = xt.clone(); + let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { Ok(watcher) => { diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 33526564d172..c6661d5733c6 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,7 +20,7 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; use crate::LOG_TARGET; use log::{trace, warn}; @@ -30,8 +30,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, - TransactionSource as Source, TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, + TransactionTag as Tag, }, }; @@ -83,6 +83,38 @@ pub struct PruneStatus { pub pruned: Vec>>, } +/// A transaction source that includes a timestamp indicating when the transaction was submitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimedTransactionSource { + /// The original source of the transaction. + pub source: TransactionSource, + + /// The time at which the transaction was submitted. + pub timestamp: Option, +} + +impl TimedTransactionSource { + /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional + /// timestamp. + pub fn new_in_block(with_timestamp: bool) -> Self { + Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::External` source and an optional + /// timestamp. + pub fn new_external(with_timestamp: bool) -> Self { + Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::Local` source and an optional + /// timestamp. + pub fn new_local(with_timestamp: bool) -> Self { + Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an given source and an optional timestamp. + pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { + Self { source, timestamp: with_timestamp.then(Instant::now) } + } +} + /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -102,8 +134,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Timed source of that transaction. + pub source: TimedTransactionSource, } impl AsRef for Transaction { @@ -157,7 +189,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source, + source: self.source.clone(), valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -448,8 +480,16 @@ impl BasePool Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), - other => other, + Some(worst) => Some( + match (worst.transaction.source.timestamp, current.transaction.source.timestamp) + { + (Some(worst_timestamp), Some(current_timestamp)) + if worst_timestamp > current_timestamp => + current.clone(), + _ if worst.imported_at > current.imported_at => current.clone(), + _ => worst, + }, + ), }); if let Some(worst) = worst { @@ -576,7 +616,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: Source::External, + source: TimedTransactionSource::new_external(false), } } @@ -1095,7 +1135,7 @@ mod tests { ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" +source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 2dd8de352c6b..e7da3a4eb6c0 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -181,10 +181,8 @@ impl Pool { pub async fn submit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -195,10 +193,8 @@ impl Pool { pub async fn resubmit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -207,10 +203,10 @@ impl Pool { pub async fn submit_one( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, source, std::iter::once(xt)).await.pop(); + let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -218,7 +214,7 @@ impl Pool { pub async fn submit_and_watch( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let (_, tx) = self @@ -368,7 +364,7 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; @@ -396,7 +392,7 @@ impl Pool { async fn verify( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; @@ -417,7 +413,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -431,7 +427,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source, xt.clone()) + .validate_transaction(block_hash, source.source, xt.clone()) .await; let status = match validation_result { @@ -488,6 +484,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -497,7 +494,8 @@ mod tests { use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; #[test] fn should_validate_and_import_transaction() { @@ -545,8 +543,8 @@ mod tests { let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); // when - let txs = txs.into_iter().map(|x| Arc::from(x)).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), SOURCE, txs)); + let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); log::debug!("--> {hashes:#?}"); // then diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 860bcff0bace..9061d0e25581 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -589,7 +589,6 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -601,7 +600,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), } } @@ -711,7 +710,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 61a26fb4138c..9a2e269b5eed 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,7 +106,6 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -126,7 +125,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), }; (hash, tx) @@ -192,7 +191,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 18ddc59eee8a..a57e67d8361d 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -30,7 +30,7 @@ use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::{self, SaturatedConversion}, - transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: TransactionSource, + source: base::TimedTransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -666,11 +666,28 @@ impl ValidatedPool { self.listener.write().retracted(block_hash) } + //todo: doc + rename! + //MultiTransactionStatusStream pub fn create_dropped_by_limits_stream( &self, ) -> super::listener::DroppedByLimitsStream, BlockHash> { self.listener.write().create_dropped_by_limits_stream() } + + /// Resends ready and future events for all the ready and future transactions that are already + /// in the pool. + /// + /// Intended to be called after cloning the instance of `ValidatedPool`. + pub fn retrigger_notifications(&self) { + let pool = self.pool.read(); + let mut listener = self.listener.write(); + pool.ready().for_each(|r| { + listener.ready(&r.hash, None); + }); + pool.futures().for_each(|f| { + listener.future(&f.hash); + }); + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 888d25d3a0d2..c7796fea5787 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -36,7 +36,10 @@ pub use api::FullChainApi; pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; -pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool}; +pub use graph::{ + base_pool::{Limit as PoolLimit, TimedTransactionSource}, + ChainApi, Options, Pool, +}; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; pub use transaction_pool_wrapper::TransactionPoolWrapper; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 5ef726c9f7d3..ed5824c884ef 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source.source.clone(), ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -121,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source, + ext.source.clone(), ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -375,9 +375,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, graph::Pool, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; @@ -398,7 +398,7 @@ mod tests { let uxt_hash = block_on(pool.submit_one( &han_of_block0, - TransactionSource::External, + TimedTransactionSource::new_external(false), uxt.clone().into(), )) .expect("Should be valid"); @@ -433,14 +433,15 @@ mod tests { let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); - let uxt_hashes = block_on(pool.submit_at( - &han_of_block0, - TransactionSource::External, - vec![uxt0.into(), uxt1.into()], - )) - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + let source = TimedTransactionSource::new_external(false); + let uxt_hashes = + block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index 0826b95cf070..2668fc9a0853 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -29,8 +29,7 @@ use crate::{ error, log_xt::log_xt_trace, }, - graph, - graph::{ExtrinsicHash, IsValidator}, + graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, PolledIterator, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -262,7 +261,12 @@ where xts: Vec>, ) -> PoolFuture, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let xts = xts.into_iter().map(Arc::from).collect::>(); + let xts = xts + .into_iter() + .map(|xt| { + (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) + }) + .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); @@ -270,7 +274,7 @@ where let number = self.api.resolve_block_number(at); async move { let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, source, xts).await) + Ok(pool.submit_at(&at, xts).await) } .boxed() } @@ -289,7 +293,8 @@ where let number = self.api.resolve_block_number(at); async move { let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, source, xt).await + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await } .boxed() } @@ -309,7 +314,13 @@ where async move { let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; Ok(watcher.into_stream().boxed()) } @@ -477,7 +488,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(false), Arc::from(xt), bytes, validity, @@ -681,23 +692,34 @@ where resubmit_transactions.extend( //todo: arctx - we need to get ref from somewhere - block_transactions.into_iter().map(Arc::from).filter(|tx| { - let tx_hash = pool.hash_of(tx); - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - log::trace!( - target: LOG_TARGET, - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, - ); - } - !contains - }), + block_transactions + .into_iter() + .map(Arc::from) + .filter(|tx| { + let tx_hash = pool.hash_of(tx); + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, + ); + } + !contains + }) + .map(|tx| { + ( + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TimedTransactionSource::new_external(false), + tx, + ) + }), ); self.metrics.report(|metrics| { @@ -705,14 +727,7 @@ where }); } - pool.resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } let extra_pool = pool.clone(); From 7b96ca38743c61ceceeea35fdc897e951719fc1b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:27:44 +0100 Subject: [PATCH 15/46] adjusting tests --- .../client/transaction-pool/tests/fatp.rs | 4 +- .../transaction-pool/tests/fatp_limits.rs | 96 +++++++------------ .../client/transaction-pool/tests/pool.rs | 28 +++--- 3 files changed, 51 insertions(+), 77 deletions(-) diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index a9ad7110f00a..c51ca6e17663 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2267,12 +2267,12 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - let event = finalized_block_event(&pool, header03.hash(), header06.hash()); + let header07 = api.push_block(7, vec![], true); + let event = finalized_block_event(&pool, header03.hash(), header07.hash()); block_on(pool.maintain(event)); let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); assert_eq!(pool.mempool_len(), (0, 0)); } diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index fbc9075af0aa..03a6050c39fe 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -642,12 +642,8 @@ fn fatp_limits_future_size_works() { assert_eq!(pool.mempool_len().0, 3); } -// this test is bad by design: xt0, xt1 cannot be dropped by the pool after finalizing 02 - there -// can be new fork where xt0, xt1 will be valid. -// todo: do we need this test? #[test] -#[ignore] -fn fatp_limits_watcher_xxx() { +fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { sp_tracing::try_init_simple(); let builder = TestPoolBuilder::new(); @@ -670,8 +666,10 @@ fn fatp_limits_watcher_xxx() { let xt4 = uxt(Eve, 600); let xt5 = uxt(Ferdie, 700); - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 2); @@ -679,8 +677,10 @@ fn fatp_limits_watcher_xxx() { let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); assert_pool_status!(header02.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 4); @@ -688,8 +688,10 @@ fn fatp_limits_watcher_xxx() { let header03 = api.push_block_with_parent(header02.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); - let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); - let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); assert_pool_status!(header03.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 6); @@ -714,63 +716,32 @@ fn fatp_limits_watcher_xxx() { block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); assert!(!pool.status_all().contains_key(&header02.hash())); + + //view 01 was dropped assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); - //todo: can we do better? We don't have API to check if event was processed internally. - let mut counter = 0; - while pool.mempool_len().1 != 4 { - sleep(std::time::Duration::from_millis(1)); - counter = counter + 1; - if counter > 20 { - assert!(false, "timeout"); - } - } - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(pool.mempool_len().1, 6); block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); - log::info!("status: {:?}", pool.status_all()); + + //no revalidation has happened yet, all txs are kept + assert_eq!(pool.mempool_len().1, 6); + + //view 03 is still there assert!(!pool.status_all().contains_key(&header03.hash())); + + //view 02 was dropped assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); - // - // - // - // - - // assert_pool_status!(header02e.hash(), &pool, 0, 0); - // - // let header02f = api.push_block_with_parent(header01.hash(), vec![], true); - // block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), - // header02f.hash()))); assert_pool_status!(header02f.hash(), &pool, 2, 0); - // assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); - // - // let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, - // xt3.clone())).unwrap(); let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), - // SOURCE, xt4.clone())).unwrap(); let result5 = block_on(pool.submit_and_watch(invalid_hash(), - // SOURCE, xt5.clone())).map(|_| ()); - // - // //xt5 hits internal mempool limit - // assert!(matches!(result5.unwrap_err().0, TxPoolError::ImmediatelyDropped)); - // - // assert_pool_status!(header02e.hash(), &pool, 2, 0); - // assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]); - // assert_eq!(pool.mempool_len().1, 4); - // - // let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); - // assert_eq!( - // xt1_status, - // vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] - // ); - // - // let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - // assert_eq!( - // xt2_status, - // vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] - // ); - // - // let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); - // assert_eq!(xt3_status, vec![TransactionStatus::Ready]); - // let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); - // assert_eq!(xt4_status, vec![TransactionStatus::Ready]); + let mut prev_header = header03; + for n in 5..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //now revalidation has happened, all txs are dropped + assert_eq!(pool.mempool_len().1, 0); } macro_rules! assert_future_iterator { @@ -867,6 +838,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { (xt4, xt4_watcher), (xt5, xt5_watcher), ]; + let future_hashes = pool.futures().iter().map(|t| t.hash).collect::>(); to_be_checked.retain(|x| !future_hashes.contains(&api.hash_and_length(&x.0).0)); diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index ed0fd7d4e655..e556ba9875f1 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -80,12 +80,14 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } +const TSOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool @@ -99,9 +101,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -116,7 +118,7 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) .unwrap(); log::debug!("-> {:?}", pool.validated_pool().status()); @@ -132,7 +134,7 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -141,7 +143,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -155,9 +157,9 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -183,9 +185,9 @@ fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -196,7 +198,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -224,7 +226,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(api.validation_requests().len(), 1); @@ -242,7 +244,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt1 = uxt(Alice, 211); - block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into())) + block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) .expect("2. Imported"); assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); From 69567b841f461cd788648c1b058cc54be66ce269 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 12 Nov 2024 18:54:50 +0100 Subject: [PATCH 16/46] fixes --- .../fork_aware_txpool/fork_aware_txpool.rs | 1 + .../transaction-pool/src/graph/base_pool.rs | 18 ++++++++++----- .../transaction-pool/tests/fatp_limits.rs | 22 ++++++++----------- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ef070615acb..d90f5f1fedd6 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1088,6 +1088,7 @@ where let watched_xts_filtered = watched_xts .into_iter() + .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) .map(|(tx_hash, tx)| (tx_hash, tx.source(), tx.tx())) .collect::>(); diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index c6661d5733c6..3d70490de900 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -483,11 +483,19 @@ impl BasePool Some( match (worst.transaction.source.timestamp, current.transaction.source.timestamp) { - (Some(worst_timestamp), Some(current_timestamp)) - if worst_timestamp > current_timestamp => - current.clone(), - _ if worst.imported_at > current.imported_at => current.clone(), - _ => worst, + (Some(worst_timestamp), Some(current_timestamp)) => { + if worst_timestamp > current_timestamp { + current.clone() + } else { + worst + } + }, + _ => + if worst.imported_at > current.imported_at { + current.clone() + } else { + worst + }, }, ), }); diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 03a6050c39fe..535478cd5aa5 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -813,6 +813,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_pool_status!(header04.hash(), &pool, 0, 2); assert_eq!(pool.futures().len(), 2); + assert_future_iterator!(pool, [xt4, xt5]); block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); assert_eq!(pool.active_views_count(), 1); @@ -830,20 +831,15 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_pool_status!(header04.hash(), &pool, 0, 2); assert_eq!(pool.futures().len(), 2); - let mut to_be_checked = vec![ - (xt0, xt0_watcher), - (xt1, xt1_watcher), - (xt2, xt2_watcher), - (xt3, xt3_watcher), - (xt4, xt4_watcher), - (xt5, xt5_watcher), - ]; - - let future_hashes = pool.futures().iter().map(|t| t.hash).collect::>(); - to_be_checked.retain(|x| !future_hashes.contains(&api.hash_and_length(&x.0).0)); - + let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; for x in to_be_checked { - let x_status = futures::executor::block_on_stream(x.1).take(2).collect::>(); + let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); } + + let to_be_checked = vec![xt4_watcher, xt5_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future]); + } } From 414ec3ccad154c9a2aab0586bfa2d2c884fd140f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:33:37 +0100 Subject: [PATCH 17/46] fatp: better support for usurped transactions --- .../src/fork_aware_txpool/dropped_watcher.rs | 24 +-- .../fork_aware_txpool/fork_aware_txpool.rs | 57 ++++- .../fork_aware_txpool/multi_view_listener.rs | 5 + .../src/fork_aware_txpool/tx_mem_pool.rs | 12 +- .../src/fork_aware_txpool/view.rs | 6 + .../src/fork_aware_txpool/view_store.rs | 197 +++++++++++++++++- .../transaction-pool/tests/fatp_prios.rs | 82 +++++--- 7 files changed, 306 insertions(+), 77 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 8de10555ea33..c0a2f470d83d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -191,8 +191,9 @@ where ) -> Option>> { trace!( target: LOG_TARGET, - "dropped_watcher: handle_event: event:{:?} views:{:?}, ", - event, + "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", + self.future_transaction_views.get(&event.0), + self.ready_transaction_views.get(&event.0), self.stream_map.keys().collect::>(), ); let (tx_hash, status) = event; @@ -219,23 +220,8 @@ where return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, - // todo: - // 1. usurpued shall be sent unconditionally - // 2. fatp shall act for every usurped message - it should remove tx from every view and - // replace it with new one (also in mempool). - TransactionStatus::Usurped(by) => { - if let Entry::Occupied(mut views_keeping_tx_valid) = - self.ready_transaction_views.entry(tx_hash) - { - views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() { - return Some(DroppedTransaction::new_usurped(tx_hash, by)) - } - } else { - debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(DroppedTransaction::new_usurped(tx_hash, by)) - } - }, + TransactionStatus::Usurped(by) => + return Some(DroppedTransaction::new_usurped(tx_hash, by)), _ => {}, }; None diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index d90f5f1fedd6..4dad7cb6bdad 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,7 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::revalidation_worker, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -201,9 +201,14 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -220,8 +225,8 @@ where ( Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -237,14 +242,17 @@ where ) } - /// Monitors the stream of dropped transactions and removes them from the mempool. + /// Monitors the stream of dropped transactions and removes them from the mempool and + /// view_store. /// /// This asynchronous task continuously listens for dropped transaction notifications provided /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` - /// and `import_notification_sink` instances. + /// and `import_notification_sink` instances. For Usurped events, the transaction is also + /// removed from the view_store. async fn dropped_monitor_task( mut dropped_stream: StreamOfDropped, mempool: Arc>, + view_store: Arc>, import_notification_sink: MultiViewImportNotificationSink< Block::Hash, ExtrinsicHash, @@ -255,10 +263,33 @@ where log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); - let tx_hash = dropped.tx_hash; - mempool.remove_dropped_transaction(dropped).await; - import_notification_sink.clean_notified_items(&[tx_hash]); + let dropped_tx_hash = dropped.tx_hash; + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + match dropped.reason { + DroppedReason::Usurped(new_tx_hash) => { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + view_store + .replace_transaction( + new_tx.source(), + new_tx.tx(), + dropped_tx_hash, + new_tx.is_watched(), + ) + .await; + } else { + log::trace!( + target:LOG_TARGET, + "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", + new_tx_hash, + ); + } + }, + DroppedReason::LimitsEnforced => {}, + }; + + mempool.remove_dropped_transaction(&dropped_tx_hash).await; + view_store.listener.transaction_dropped(dropped); + import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } } @@ -293,9 +324,13 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -311,8 +346,8 @@ where Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 52acb22d8a6b..a00234a99808 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -285,6 +285,11 @@ where Self { controllers: Default::default() } } + /// Returns `true` if the listener contains a stream controller for the specified hash. + pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { + self.controllers.read().contains_key(tx_hash) + } + /// Creates an external aggregated stream of events for given transaction. /// /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 09b81be77663..463ff938a7fb 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -98,7 +98,7 @@ where /// Shall the progress of transaction be watched. /// /// Was transaction sent with `submit_and_watch`. - fn is_watched(&self) -> bool { + pub(crate) fn is_watched(&self) -> bool { self.watched } @@ -328,15 +328,13 @@ where self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transactions from the memory pool which are specified by the given list of hashes - /// and send the `Dropped` event to the listeners of these transactions. + /// Removes transaction from the memory pool which are specified by the given list of hashes. pub(super) async fn remove_dropped_transaction( &self, - dropped: DroppedTransaction>, - ) { + dropped: &ExtrinsicHash, + ) -> Option>> { log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(&dropped.tx_hash); - self.listener.transaction_dropped(dropped); + self.transactions.write().remove(dropped) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 52855994d9b9..dd3cf6bddcc6 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -454,4 +454,10 @@ where ); } } + + /// Returns true if the transaction hash is already imported into the view + pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { + const IGNORE_BANNED: bool = false; + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_ok() + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 41e3fff29f5f..47d2f12b4033 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -37,7 +37,38 @@ use parking_lot::RwLock; use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::Instant, +}; + +/// Helper struct to keep the context for transaction replacements. +#[derive(Clone)] +struct PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Indicates if the new transaction was already submitted to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, + /// New transaction replacing the old one. + xt: ExtrinsicFor, + /// Source of the transaction. + source: TimedTransactionSource, + /// Inidicates if transaction is watched. + watched: bool, +} + +impl PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction replacement. + fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { + Self { processed: false, xt, source, watched } + } +} /// The helper structure encapsulates all the views. pub(super) struct ViewStore @@ -65,6 +96,13 @@ where pub(super) most_recent_view: RwLock>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, + /// The map used to synchronize replacement of transactions between maintain and dropped + /// notifcication threads. It is meant to assure that replaced transaction is also removed from + /// newly built views in maintain process. + /// + /// The map's key is hash of replaced extrinsic. + pending_txs_replacements: + RwLock, PendingTxReplacement>>, } impl ViewStore @@ -86,6 +124,7 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, + pending_txs_replacements: Default::default(), } } @@ -332,12 +371,16 @@ where /// - moved to the inactive views set (`inactive_views`), /// - removed from the multi view listeners. /// - /// The `most_recent_view` is update with the reference to the newly inserted view. + /// The `most_recent_view` is updated with the reference to the newly inserted view. + /// + /// If there are any pending tx replacments, they are applied to the new view. pub(super) async fn insert_new_view( &self, view: Arc>, tree_route: &TreeRoute, ) { + self.apply_pending_tx_replacements(view.clone()).await; + //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -389,8 +432,10 @@ where let mut removed_views = vec![]; { - self.active_views - .read() + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + + active_views .iter() .filter(|(hash, v)| !match finalized_number { Err(_) | Ok(None) => **hash == finalized_hash, @@ -399,11 +444,8 @@ where }) .map(|(_, v)| removed_views.push(v.at.hash)) .for_each(drop); - } - { - self.inactive_views - .read() + inactive_views .iter() .filter(|(_, v)| !match finalized_number { Err(_) | Ok(None) => false, @@ -445,6 +487,7 @@ where //clean up older then finalized { let mut active_views = self.active_views.write(); + let mut inactive_views = self.inactive_views.write(); active_views.retain(|hash, v| { let retain = match finalized_number { Err(_) | Ok(None) => *hash == finalized_hash, @@ -456,10 +499,7 @@ where } retain }); - } - { - let mut inactive_views = self.inactive_views.write(); inactive_views.retain(|hash, v| { let retain = match finalized_number { Err(_) | Ok(None) => false, @@ -507,4 +547,139 @@ where futures::future::join_all(finish_revalidation_futures).await; log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); } + + /// Replaces an existing transaction in the view_store with a new one. + /// + /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. + /// + /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement + /// record will be inserted into a pending replacement map. Once the submission to all the views + /// is accomplished, the record is marked as *processed*. + /// + /// This map is later applied in `insert_new_view` method executed from different thread. + /// + /// If the transaction is already being replaced, it will simply return without making + /// changes. + pub(super) async fn replace_transaction( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + replaced: ExtrinsicHash, + watched: bool, + ) { + if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { + entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + } else { + return + }; + + let xt_hash = self.api.hash_and_length(&xt).0; + log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + + self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + + if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { + replacement.processed = true; + } + } + + /// Applies pending transaction replacements to the specified view. + /// + /// After application, all already processed replacements are removed. + async fn apply_pending_tx_replacements(&self, view: Arc>) { + let mut futures = vec![]; + for replacement in self.pending_txs_replacements.read().values() { + let xt_hash = self.api.hash_and_length(&replacement.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + replacement.source.clone(), + replacement.xt.clone(), + xt_hash, + replacement.watched, + )); + } + let _results = futures::future::join_all(futures).await; + self.pending_txs_replacements.write().retain(|_, r| r.processed); + } + + /// Submits `xt` to the given view. + /// + /// For watched transaction stream is added to the listener. + async fn replace_transaction_in_view( + &self, + view: Arc>, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + watched: bool, + ) { + if watched { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + xt_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + }, + Err(e) => { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit_and_watch to {} failed {}", + xt_hash, view.at.hash, e + ); + }, + } + } else { + if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit to {} failed {}", + xt_hash, view.at.hash, e + ); + } + } + } + + /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. + /// + /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` + /// is replacing `replaced` extrinsic. + async fn replace_transaction_in_views( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + replaced: ExtrinsicHash, + watched: bool, + ) { + if watched && !self.listener.contains_tx(&xt_hash) { + log::trace!( + target:LOG_TARGET, + "error: replace_transaction_in_views: no listener for watched transaction {:?}", + xt_hash, + ); + return; + } + + let submit_futures = { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| !view.is_imported(&replaced)) + .map(|(_, view)| { + self.replace_transaction_in_view( + view.clone(), + source.clone(), + xt.clone(), + xt_hash, + watched, + ) + }) + .collect::>() + }; + let _results = futures::future::join_all(submit_futures).await; + } } diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index e254a3d8171d..544ea108306a 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -20,15 +20,10 @@ pub mod fatp_common; -use fatp_common::{ - finalized_block_event, invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE, -}; +use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; use futures::{executor::block_on, FutureExt}; use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{ - error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, -}; -use std::{thread::sleep, time::Duration}; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; @@ -59,28 +54,6 @@ fn fatp_prio_ready_higher_evicts_lower() { log::info!("len: {:?}", pool.status_all()[&header01.hash()]); assert_ready_iterator!(header01.hash(), pool, [xt1]); assert_pool_status!(header01.hash(), &pool, 1, 0); - - // let results = block_on(futures::future::join_all(submissions)); - // assert!(results.iter().all(Result::is_ok)); - // //charlie was not included into view: - // assert_pool_status!(header01.hash(), &pool, 2, 0); - - // //branch with alice transactions: - // let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); - // let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); - // block_on(pool.maintain(event)); - // assert_eq!(pool.mempool_len().0, 2); - // assert_pool_status!(header02b.hash(), &pool, 0, 0); - // assert_ready_iterator!(header02b.hash(), pool, []); - // - // //branch with alice/charlie transactions shall also work: - // let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); - // api.set_nonce(header02a.hash(), Alice.into(), 201); - // let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); - // block_on(pool.maintain(event)); - // assert_eq!(pool.mempool_len().0, 2); - // // assert_pool_status!(header02a.hash(), &pool, 1, 0); - // assert_ready_iterator!(header02a.hash(), pool, [xt2]); } #[test] @@ -161,3 +134,54 @@ fn fatp_prio_watcher_future_higher_evicts_lower() { assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_pool_status!(header01.hash(), &pool, 2, 0); } + +#[test] +fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 1, 0); + assert_ready_iterator!(header03a.hash(), pool, [xt0]); + assert_pool_status!(header03b.hash(), &pool, 1, 0); + assert_ready_iterator!(header03b.hash(), pool, [xt0]); + assert_ready_iterator!(header01.hash(), pool, [xt0]); + assert_ready_iterator!(header02.hash(), pool, [xt0]); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + assert_ready_iterator!(header03a.hash(), pool, [xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt1]); +} From 0cf6043ff9f8da4625d493541f8f4c3bff68e2fb Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Thu, 14 Nov 2024 15:36:25 +0000 Subject: [PATCH 18/46] Update from michalkucharczyk running command 'prdoc --bump major --audience node_dev' --- prdoc/pr_6405.prdoc | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 prdoc/pr_6405.prdoc diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc new file mode 100644 index 000000000000..c75825f172c3 --- /dev/null +++ b/prdoc/pr_6405.prdoc @@ -0,0 +1,28 @@ +title: '`fatxpool`: handling limits and priorities improvements' +doc: +- audience: Node Dev + description: |- + This PR provides a number of improvements around handling limits and priorities in the fork-aware transaction pool. + + + #### Notes to reviewers. + Following are the major changes: + 1. #### [Better support](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/414ec3ccad154c9a2aab0586bfa2d2c884fd140f) for `Usurped` transactions + + When any view reports an `Usurped` transaction (replaced by other with higher priority) it is removed from all the views (also inactive). Removal is implemented by simply submitting usurper transaction to all the views. It is also ensured that usurped tx will not sneak into the `view_store` in newly created view. + + 1. #### [`TimedTransactionSource`](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/f10590f3bde69b31250761a5b10802fb139ab2b2) introduced: + + Every view now has an information when the transaction entered the pool. Enforce limits (now only for future txs) uses this timestamp to find worst transactions. Having common timestamp ensures coherent assessment of the transaction's transaction across different views. This also could later be used to select which ready transaction shall be dropped. + + 1. #### `DroppedWatcher` [improved logic](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/560db28c987dd1e634119788ebc8318967df206b) for future transactions + For future transaction - if the last referencing view is removed, the transaction will be dropped from the pool. This prevents future unincluded and un-promoted transactions from staying in the pool for long time. + + And some minor changes: + - `graph::BasePool`: [handling priorities](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/c9f2d39355853d034fdbc6ea31e4e0e5bf34cb6a) for future transaction improved (previously transaction with lower prio was reported as failed), + - `graph::listener`: dedicated `limit_enforced`/`usurped`/`dropped` [calls added](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/7b58a68cccfcf372321ea41826fbe9d4222829cf), + - flaky test [fixed](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/e0a7bc6c048245943796839b166505e2aecdbd7d) + - new tests added. +crates: +- name: sc-transaction-pool + bump: major From 44a0096f0abde96a297571aa3483064e2dad7d11 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:46:42 +0100 Subject: [PATCH 19/46] one more test + future_at added --- .../fork_aware_txpool/fork_aware_txpool.rs | 10 +++ .../src/fork_aware_txpool/view_store.rs | 12 +++- .../transaction-pool/tests/fatp_common/mod.rs | 14 +++++ .../transaction-pool/tests/fatp_limits.rs | 23 ++----- .../transaction-pool/tests/fatp_prios.rs | 62 +++++++++++++++++++ 5 files changed, 101 insertions(+), 20 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4dad7cb6bdad..c306d896ab7d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -406,6 +406,16 @@ where self.mempool.unwatched_and_watched_count() } + /// Returns a set of future transactions for given block hash. + /// + /// Intended for logging / tests. + pub fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.view_store.futures_at(at) + } + /// Returns a best-effort set of ready transactions for a given block, without executing full /// maintain process. /// diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 47d2f12b4033..605b2380c822 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -303,12 +303,20 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.get_view_at(at, true)) + .map(|at| self.futures_at(at)) .flatten() - .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) .unwrap_or_default() } + /// Returns a list of future transactions in the view at given block hash. + pub(super) fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.get_view_at(at, true) + .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + } + /// Collects all the transactions included in the blocks on the provided `tree_route` and /// triggers finalization event for them. /// diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 15f2b7f79c14..aecd83360f1e 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -201,6 +201,20 @@ macro_rules! assert_ready_iterator { }}; } +#[macro_export] +macro_rules! assert_future_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures_at($hash).unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 535478cd5aa5..afd8183957a8 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -28,7 +28,7 @@ use sc_transaction_pool::ChainApi; use sc_transaction_pool_api::{ error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; -use std::{collections::HashSet, thread::sleep}; +use std::thread::sleep; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; @@ -744,19 +744,6 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() assert_eq!(pool.mempool_len().1, 0); } -macro_rules! assert_future_iterator { - ($pool:expr, [$( $xt:expr ),*]) => {{ - let futures = $pool.futures(); - let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; - log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); - log::debug!(target:LOG_TARGET, "output: {:#?}", expected); - assert_eq!(expected.len(), futures.len()); - let hsf = futures.iter().map(|a| a.hash).collect::>(); - let hse = expected.into_iter().collect::>(); - assert_eq!(hse,hsf); - }}; -} - #[test] fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { sp_tracing::try_init_simple(); @@ -786,7 +773,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_pool_status!(header01.hash(), &pool, 0, 2); assert_eq!(pool.mempool_len().1, 2); - assert_future_iterator!(pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); @@ -796,7 +783,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_pool_status!(header02.hash(), &pool, 0, 2); assert_eq!(pool.mempool_len().1, 4); - assert_future_iterator!(pool, [xt2, xt3]); + assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); let header03 = api.push_block_with_parent(header02.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); @@ -806,14 +793,14 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_pool_status!(header03.hash(), &pool, 0, 2); assert_eq!(pool.mempool_len().1, 6); - assert_future_iterator!(pool, [xt4, xt5]); + assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); let header04 = api.push_block_with_parent(header03.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); assert_pool_status!(header04.hash(), &pool, 0, 2); assert_eq!(pool.futures().len(), 2); - assert_future_iterator!(pool, [xt4, xt5]); + assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); assert_eq!(pool.active_views_count(), 1); diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 544ea108306a..41bc374b38f4 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -185,3 +185,65 @@ fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { assert_ready_iterator!(header01.hash(), pool, [xt1]); assert_ready_iterator!(header02.hash(), pool, [xt1]); } + +#[test] +fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 0, 2); + assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); + assert_pool_status!(header03b.hash(), &pool, 0, 2); + assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); + + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + assert_future_iterator!(header03a.hash(), pool, []); + assert_future_iterator!(header03b.hash(), pool, []); + assert_future_iterator!(header01.hash(), pool, []); + assert_future_iterator!(header02.hash(), pool, []); + + assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); +} From d69c660ffbfdef661e2412d2388b17a7a86e9f33 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 17:29:33 +0100 Subject: [PATCH 20/46] clippy --- .../transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs | 5 +---- .../transaction-pool/src/single_state_txpool/revalidation.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 463ff938a7fb..c1dd50ea6378 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,10 +26,7 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{ - dropped_watcher::DroppedTransaction, metrics::MetricsLink as PrometheusMetrics, - multi_view_listener::MultiViewListener, -}; +use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; use crate::{ common::log_xt::log_xt_trace, graph, diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index ed5824c884ef..5368ff71d602 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source.source.clone(), ext.data.clone()) + api.validate_transaction(at, ext.source.source, ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) From b47b174955f87d85cde7b46574df0635d76595e9 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:16:47 +0100 Subject: [PATCH 21/46] is_imported: fix --- .../client/transaction-pool/src/fork_aware_txpool/view.rs | 4 ++-- .../transaction-pool/src/fork_aware_txpool/view_store.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index dd3cf6bddcc6..0fa90ccfcb31 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -455,9 +455,9 @@ where } } - /// Returns true if the transaction hash is already imported into the view + /// Returns true if the transaction with given hash is already imported into the view. pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { const IGNORE_BANNED: bool = false; - self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_ok() + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 605b2380c822..a06c051f0a7e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -676,7 +676,7 @@ where active_views .iter() .chain(inactive_views.iter()) - .filter(|(_, view)| !view.is_imported(&replaced)) + .filter(|(_, view)| view.is_imported(&replaced)) .map(|(_, view)| { self.replace_transaction_in_view( view.clone(), From 2d0bbf83e2df2b4c641ef84c1188907c4bfad3c6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:17:32 +0100 Subject: [PATCH 22/46] fatp: avoid some duplications in update_view_with_mempool --- .../fork_aware_txpool/fork_aware_txpool.rs | 45 +++++++------------ .../src/fork_aware_txpool/tx_mem_pool.rs | 5 +++ 2 files changed, 22 insertions(+), 28 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index c306d896ab7d..a1528e13a580 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1115,46 +1115,35 @@ where self.active_views_count() ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; - let xts = self.mempool.clone_unwatched(); - let mut all_submitted_count = 0; - if !xts.is_empty() { - let unwatched_count = xts.len(); - let xts_with_src = xts - .into_iter() - .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(_, tx)| (tx.source(), tx.tx())); - - let results = view.submit_many(xts_with_src).await; - all_submitted_count = results.len(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); - } - - let watched_xts_filtered = watched_xts + let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts .into_iter() - .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) + .chain(self.mempool.clone_unwatched().into_iter()) + .filter(|(hash, _)| !view.is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx_hash, tx.source(), tx.tx())) - .collect::>(); - - let watched_submitted_count = watched_xts_filtered.len(); + .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) + .unzip(); - let hashes = watched_xts_filtered.iter().map(|i| i.0).collect::>(); let watched_results = view - .submit_many(watched_xts_filtered.into_iter().map(|i| (i.1, i.2))) + .submit_many(xts_filtered) .await .into_iter() .zip(hashes) .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) .collect::>(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); + let submitted_count = watched_results.len(); - all_submitted_count += watched_submitted_count; - let _ = all_submitted_count - .try_into() - .map(|v| self.metrics.report(|metrics| metrics.submitted_from_mempool_txs.inc_by(v))); + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: at {:?} submitted {}/{}", + view.at.hash, + submitted_count, + self.mempool.len() + ); + + self.metrics + .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); // if there are no views yet, and a single newly created view is reporting error, just send // out the invalid event, and remove transaction. diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index c1dd50ea6378..16f2e5f3932d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -256,6 +256,11 @@ where (transactions.len() - watched_count, watched_count) } + /// Returns a total number of transactions kept withing mempool. + pub fn len(&self) -> usize { + self.transactions.read().len() + } + /// Returns the number of bytes used by all extrinsics in the the pool. #[cfg(test)] pub fn bytes(&self) -> usize { From 31b3392ff269e4ef095218ece7734b57a67450b9 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:19:38 +0100 Subject: [PATCH 23/46] prdoc updated --- prdoc/pr_6405.prdoc | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc index c75825f172c3..85e37f521f72 100644 --- a/prdoc/pr_6405.prdoc +++ b/prdoc/pr_6405.prdoc @@ -2,27 +2,8 @@ title: '`fatxpool`: handling limits and priorities improvements' doc: - audience: Node Dev description: |- - This PR provides a number of improvements around handling limits and priorities in the fork-aware transaction pool. + This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. - - #### Notes to reviewers. - Following are the major changes: - 1. #### [Better support](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/414ec3ccad154c9a2aab0586bfa2d2c884fd140f) for `Usurped` transactions - - When any view reports an `Usurped` transaction (replaced by other with higher priority) it is removed from all the views (also inactive). Removal is implemented by simply submitting usurper transaction to all the views. It is also ensured that usurped tx will not sneak into the `view_store` in newly created view. - - 1. #### [`TimedTransactionSource`](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/f10590f3bde69b31250761a5b10802fb139ab2b2) introduced: - - Every view now has an information when the transaction entered the pool. Enforce limits (now only for future txs) uses this timestamp to find worst transactions. Having common timestamp ensures coherent assessment of the transaction's transaction across different views. This also could later be used to select which ready transaction shall be dropped. - - 1. #### `DroppedWatcher` [improved logic](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/560db28c987dd1e634119788ebc8318967df206b) for future transactions - For future transaction - if the last referencing view is removed, the transaction will be dropped from the pool. This prevents future unincluded and un-promoted transactions from staying in the pool for long time. - - And some minor changes: - - `graph::BasePool`: [handling priorities](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/c9f2d39355853d034fdbc6ea31e4e0e5bf34cb6a) for future transaction improved (previously transaction with lower prio was reported as failed), - - `graph::listener`: dedicated `limit_enforced`/`usurped`/`dropped` [calls added](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/7b58a68cccfcf372321ea41826fbe9d4222829cf), - - flaky test [fixed](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/e0a7bc6c048245943796839b166505e2aecdbd7d) - - new tests added. crates: - name: sc-transaction-pool - bump: major + bump: minor From f66b1add9b9901b9a9b3253578d435c97fbfb6cc Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:22:45 +0100 Subject: [PATCH 24/46] misspell --- .../transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 16f2e5f3932d..f29ef08e6b6c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -256,7 +256,7 @@ where (transactions.len() - watched_count, watched_count) } - /// Returns a total number of transactions kept withing mempool. + /// Returns a total number of transactions kept within mempool. pub fn len(&self) -> usize { self.transactions.read().len() } From 444996914e3f6e0fdebc0dae64baef66a8938fff Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:31:10 +0100 Subject: [PATCH 25/46] prodc --- prdoc/pr_6405.prdoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc index 85e37f521f72..9e4e0b3c6c20 100644 --- a/prdoc/pr_6405.prdoc +++ b/prdoc/pr_6405.prdoc @@ -6,4 +6,4 @@ doc: crates: - name: sc-transaction-pool - bump: minor + bump: major From 26d27ec25db73be89eddea2aed9eaf837e8ba50b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:08:50 +0100 Subject: [PATCH 26/46] api: async_trait --- substrate/bin/node/bench/Cargo.toml | 1 + substrate/bin/node/bench/src/construct.rs | 46 +++----- substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../src/transaction/tests/middleware_pool.rs | 93 +++++++-------- substrate/client/service/src/lib.rs | 16 ++- .../client/transaction-pool/api/src/lib.rs | 53 +++------ .../fork_aware_txpool/fork_aware_txpool.rs | 109 +++++++++--------- .../src/fork_aware_txpool/mod.rs | 12 +- substrate/client/transaction-pool/src/lib.rs | 2 - .../single_state_txpool.rs | 108 ++++++++--------- .../src/transaction_pool_wrapper.rs | 52 ++++----- 11 files changed, 213 insertions(+), 280 deletions(-) diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index 8c6556da682c..447f947107c1 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +async-trait = { workspace = true } array-bytes = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index bed6e3d914c2..4c3b5a797734 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -24,14 +24,14 @@ //! DO NOT depend on user input). Thus transaction generation should be //! based on randomized data. -use futures::Future; use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; +use async_trait::async_trait; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, - TransactionSource, TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; @@ -224,54 +224,49 @@ impl ReadyTransactions for TransactionsIterator { fn report_invalid(&mut self, _tx: &Self::Item) {} } +#[async_trait] impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; type Hash = node_primitives::Hash; type InPoolTransaction = PoolTransaction; type Error = sc_transaction_pool_api::error::Error; - /// Returns a future that imports a bunch of unverified transactions to the pool. - fn submit_at( + /// Asynchronously imports a bunch of unverified transactions to the pool. + async fn submit_at( &self, _at: Self::Hash, _source: TransactionSource, _xts: Vec>, - ) -> PoolFuture>, Self::Error> { + ) -> Result>, Self::Error> { unimplemented!() } - /// Returns a future that imports one unverified transaction to the pool. - fn submit_one( + /// Asynchronously imports one unverified transaction to the pool. + async fn submit_one( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> PoolFuture, Self::Error> { + ) -> Result, Self::Error> { unimplemented!() } - fn submit_and_watch( + async fn submit_and_watch( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { + ) -> Result>>, Self::Error> { unimplemented!() } - fn ready_at( + async fn ready_at( &self, _at: Self::Hash, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - > { + ) -> Box> + Send> { let iter: Box> + Send> = Box::new(TransactionsIterator(self.0.clone().into_iter())); - Box::pin(futures::future::ready(iter)) + iter } fn ready(&self) -> Box> + Send> { @@ -306,18 +301,11 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { unimplemented!() } - fn ready_at_with_timeout( + async fn ready_at_with_timeout( &self, _at: Self::Hash, _timeout: std::time::Duration, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send - + '_, - >, - > { + ) -> Box> + Send> { unimplemented!() } } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 58dd8b830beb..daa805912fb9 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -44,6 +44,7 @@ rand = { workspace = true, default-features = true } schnellru = { workspace = true } [dev-dependencies] +async-trait = { workspace = true } jsonrpsee = { workspace = true, features = ["server", "ws-client"] } serde_json = { workspace = true, default-features = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs index adcc987f9c39..82a2bc8681a3 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -16,16 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use async_trait::async_trait; use codec::Encode; -use futures::Future; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use crate::hex_string; -use futures::{FutureExt, StreamExt}; +use futures::StreamExt; use sp_runtime::traits::Block as BlockT; use std::{collections::HashMap, pin::Pin, sync::Arc}; @@ -77,67 +77,65 @@ impl MiddlewarePool { } } +#[async_trait] impl TransactionPool for MiddlewarePool { type Block = as TransactionPool>::Block; type Hash = as TransactionPool>::Hash; type InPoolTransaction = as TransactionPool>::InPoolTransaction; type Error = as TransactionPool>::Error; - fn submit_at( + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { - self.inner_pool.submit_at(at, source, xts) + ) -> Result, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts).await } - fn submit_one( + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error> { - self.inner_pool.submit_one(at, source, xt) + ) -> Result, Self::Error> { + self.inner_pool.submit_one(at, source, xt).await } - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { + ) -> Result>>, Self::Error> { let pool = self.inner_pool.clone(); let sender = self.sender.clone(); let transaction = hex_string(&xt.encode()); - async move { - let watcher = match pool.submit_and_watch(at, source, xt).await { - Ok(watcher) => watcher, - Err(err) => { - let _ = sender.send(MiddlewarePoolEvent::PoolError { - transaction: transaction.clone(), - err: err.to_string(), - }); - return Err(err); - }, - }; - - let watcher = watcher.map(move |status| { - let sender = sender.clone(); - let transaction = transaction.clone(); - - let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { - transaction, - status: status.clone(), + let watcher = match pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), }); + return Err(err); + }, + }; - status + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); + + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), }); - Ok(watcher.boxed()) - } - .boxed() + status + }); + + Ok(watcher.boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -164,17 +162,11 @@ impl TransactionPool for MiddlewarePool { self.inner_pool.ready_transaction(hash) } - fn ready_at( + async fn ready_at( &self, at: ::Hash, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - > { - self.inner_pool.ready_at(at) + ) -> Box> + Send> { + self.inner_pool.ready_at(at).await } fn ready(&self) -> Box> + Send> { @@ -185,18 +177,11 @@ impl TransactionPool for MiddlewarePool { self.inner_pool.futures() } - fn ready_at_with_timeout( + async fn ready_at_with_timeout( &self, at: ::Hash, _timeout: std::time::Duration, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send - + '_, - >, - > { - self.inner_pool.ready_at(at) + ) -> Box> + Send> { + self.inner_pool.ready_at(at).await } } diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 5cfd80cef910..9c01d7288a81 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -528,13 +528,17 @@ where }; let start = std::time::Instant::now(); - let import_future = self.pool.submit_one( - self.client.info().best_hash, - sc_transaction_pool_api::TransactionSource::External, - uxt, - ); + let pool = self.pool.clone(); + let client = self.client.clone(); Box::pin(async move { - match import_future.await { + match pool + .submit_one( + client.info().best_hash, + sc_transaction_pool_api::TransactionSource::External, + uxt, + ) + .await + { Ok(_) => { let elapsed = start.elapsed(); debug!(target: sc_transaction_pool::LOG_TARGET, "import transaction: {elapsed:?}"); diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 3ac1a79a0c28..343379a83db3 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -208,9 +208,6 @@ pub type LocalTransactionFor

= <

::Block as BlockT> /// Transaction's index within the block in which it was included. pub type TxIndex = usize; -/// Typical future type used in transaction pool api. -pub type PoolFuture = std::pin::Pin> + Send>>; - /// In-pool transaction interface. /// /// The pool is container of transactions that are implementing this trait. @@ -238,6 +235,7 @@ pub trait InPoolTransaction { } /// Transaction pool interface. +#[async_trait] pub trait TransactionPool: Send + Sync { /// Block type. type Block: BlockT; @@ -253,46 +251,40 @@ pub trait TransactionPool: Send + Sync { // *** RPC - /// Returns a future that imports a bunch of unverified transactions to the pool. - fn submit_at( + /// Asynchronously imports a bunch of unverified transactions to the pool. + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error>; + ) -> Result, Self::Error>>, Self::Error>; - /// Returns a future that imports one unverified transaction to the pool. - fn submit_one( + /// Asynchronously imports one unverified transaction to the pool. + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error>; + ) -> Result, Self::Error>; - /// Returns a future that imports a single transaction and starts to watch their progress in the + /// Asynchronously imports a single transaction and starts to watch their progress in the /// pool. - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error>; + ) -> Result>>, Self::Error>; // *** Block production / Networking /// Get an iterator for ready transactions ordered by priority. /// - /// Guarantees to return only when transaction pool got updated at `at` block. - /// Guarantees to return immediately when `None` is passed. - fn ready_at( + /// Guarantees to resolve only when transaction pool got updated at `at` block. + /// Guarantees to resolve immediately when `None` is passed. + async fn ready_at( &self, at: ::Hash, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - >; + ) -> Box> + Send>; /// Get an iterator for ready transactions ordered by priority. fn ready(&self) -> Box> + Send>; @@ -322,22 +314,15 @@ pub trait TransactionPool: Send + Sync { /// Return specific ready transaction by hash, if there is one. fn ready_transaction(&self, hash: &TxHash) -> Option>; - /// Returns set of ready transaction at given block within given timeout. + /// Asynchronously returns a set of ready transaction at given block within given timeout. /// - /// If the timeout is hit during method execution then the best effort set of ready transactions - /// for given block, without executing full maintain process is returned. - fn ready_at_with_timeout( + /// If the timeout is hit during method execution then the best effort (without executing full + /// maintain process) set of ready transactions for given block, is returned. + async fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send - + '_, - >, - >; + ) -> Box> + Send>; } /// An iterator of ready transactions. diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index a1528e13a580..cf1a3854f6f9 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -37,7 +37,7 @@ use crate::{ base_pool::{TimedTransactionSource, Transaction}, ExtrinsicFor, ExtrinsicHash, IsValidator, Options, }, - PolledIterator, ReadyIteratorFor, LOG_TARGET, + ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; use futures::{ @@ -49,8 +49,8 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, - TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -114,6 +114,8 @@ where } } +type PolledIterator = Pin> + Send>>; + /// The fork-aware transaction pool. /// /// It keeps track of every fork and provides the set of transactions that is valid for every fork. @@ -514,18 +516,18 @@ where /// maintain. /// /// Returns a future resolving to a ready iterator of transactions. - fn ready_at_with_timeout_internal( + async fn ready_at_with_timeout_internal( &self, at: Block::Hash, timeout: std::time::Duration, - ) -> PolledIterator { + ) -> ReadyIteratorFor { log::debug!(target: LOG_TARGET, "fatp::ready_at_with_timeout at {:?} allowed delay: {:?}", at, timeout); let timeout = futures_timer::Delay::new(timeout); let (view_already_exists, ready_at) = self.ready_at_internal(at); if view_already_exists { - return ready_at; + return ready_at.await; } let maybe_ready = async move { @@ -543,11 +545,9 @@ where }; let fall_back_ready = self.ready_at_light(at); - Box::pin(async { - let (maybe_ready, fall_back_ready) = - futures::future::join(maybe_ready.boxed(), fall_back_ready.boxed()).await; - maybe_ready.unwrap_or(fall_back_ready) - }) + let (maybe_ready, fall_back_ready) = + futures::future::join(maybe_ready.boxed(), fall_back_ready.boxed()).await; + maybe_ready.unwrap_or(fall_back_ready) } fn ready_at_internal(&self, at: Block::Hash) -> (bool, PolledIterator) { @@ -623,6 +623,7 @@ fn reduce_multiview_result(input: HashMap>>) -> Vec TransactionPool for ForkAwareTxPool where Block: BlockT, @@ -640,12 +641,12 @@ where /// /// The internal limits of the pool are checked. The results of submissions to individual views /// are reduced to single result. Refer to `reduce_multiview_result` for more details. - fn submit_at( + async fn submit_at( &self, _: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { + ) -> Result, Self::Error>>, Self::Error> { let view_store = self.view_store.clone(); log::debug!(target: LOG_TARGET, "fatp::submit_at count:{} views:{}", xts.len(), self.active_views_count()); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "[{:?}] fatp::submit_at"); @@ -653,11 +654,10 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return future::ready(Ok(mempool_results + return Ok(mempool_results .into_iter() - .map(|r| r.map(|r| r.hash)) - .collect::>())) - .boxed() + .map(|r| r.map(|r| r.hash).map_err(Into::into)) + .collect::>()) } let to_be_submitted = mempool_results @@ -672,14 +672,15 @@ where .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); let mempool = self.mempool.clone(); - async move { - let results_map = view_store.submit(to_be_submitted.into_iter()).await; - let mut submission_results = reduce_multiview_result(results_map).into_iter(); + let results_map = view_store.submit(to_be_submitted.into_iter()).await; + let mut submission_results = reduce_multiview_result(results_map).into_iter(); - Ok(mempool_results + Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|insertion| { + result + .map_err(Into::into) + .and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") @@ -689,61 +690,61 @@ where }) }) .collect::>()) - } - .boxed() } /// Submits a single transaction and returns a future resolving to the submission results. /// /// Actual transaction submission process is delegated to the `submit_at` function. - fn submit_one( + async fn submit_one( &self, _at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error> { + ) -> Result, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_one views:{}", self.tx_hash(&xt), self.active_views_count()); - let result_future = self.submit_at(_at, source, vec![xt]); - async move { - let result = result_future.await; - match result { - Ok(mut v) => - v.pop().expect("There is exactly one element in result of submit_at. qed."), - Err(e) => Err(e), - } + match self.submit_at(_at, source, vec![xt]).await { + Ok(mut v) => + v.pop().expect("There is exactly one element in result of submit_at. qed."), + Err(e) => Err(e), } - .boxed() } /// Submits a transaction and starts to watch its progress in the pool, returning a stream of /// status updates. /// /// Actual transaction submission process is delegated to the `ViewStore` internal instance. - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { + ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); + //todo: clones let xt = Arc::from(xt); + let view_store = self.view_store.clone(); + let mempool = self.mempool.clone(); + let metrics = self.metrics.clone(); + let this = (*self).clone(); + let InsertionInfo { hash: xt_hash, source: timed_source } = - match self.mempool.push_watched(source, xt.clone()) { + match mempool.push_watched(source, xt.clone()) { Ok(result) => result, - Err(e) => return future::ready(Err(e)).boxed(), + Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped) => + if let Some(result) = this.try_drop_something(at, source, xt.clone()).await { + result + } else { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()) + }, + Err(e) => return Err(e.into()), // return Err(e) }; - self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + metrics.report(|metrics| metrics.submitted_transactions.inc()); - let view_store = self.view_store.clone(); - let mempool = self.mempool.clone(); - async move { - view_store - .submit_and_watch(at, timed_source, xt) - .await - .inspect_err(|_| mempool.remove(xt_hash)) - } - .boxed() + view_store + .submit_and_watch(at, timed_source, xt) + .await + .inspect_err(|_| mempool.remove(xt_hash)) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -814,9 +815,9 @@ where } /// Returns an iterator for ready transactions at a specific block, ordered by priority. - fn ready_at(&self, at: ::Hash) -> PolledIterator { + async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { let (_, result) = self.ready_at_internal(at); - result + result.await } /// Returns an iterator for ready transactions, ordered by priority. @@ -839,12 +840,12 @@ where /// /// If the timeout expires before the maintain process is accomplished, a best-effort /// set of transactions is returned (refer to `ready_at_light`). - fn ready_at_with_timeout( + async fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> PolledIterator { - self.ready_at_with_timeout_internal(at, timeout) + ) -> ReadyIteratorFor { + self.ready_at_with_timeout_internal(at, timeout).await } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs index 9f979e216b6d..5f7294a24fd7 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs @@ -201,12 +201,12 @@ //! required to accomplish it. //! //! ### Providing ready transactions: `ready_at` -//! The [`ready_at`] function returns a [future][`crate::PolledIterator`] that resolves to the -//! [ready transactions iterator][`ReadyTransactions`]. The block builder shall wait either for the -//! future to be resolved or for timeout to be hit. To avoid building empty blocks in case of -//! timeout, the waiting for timeout functionality was moved into the transaction pool, and new API -//! function was added: [`ready_at_with_timeout`]. This function also provides a fall back ready -//! iterator which is result of [light maintain](#light-maintain). +//! The asynchronous [`ready_at`] function resolves to the [ready transactions +//! iterator][`ReadyTransactions`]. The block builder shall wait either for the future to be +//! resolved or for timeout to be hit. To avoid building empty blocks in case of timeout, the +//! waiting for timeout functionality was moved into the transaction pool, and new API function was +//! added: [`ready_at_with_timeout`]. This function also provides a fall back ready iterator which +//! is result of [light maintain](#light-maintain). //! //! New function internally waits either for [maintain](#maintain) process triggered for requested //! block to be accomplished or for the timeout. If timeout hits then the result of [light diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index c7796fea5787..8bf36932ca0e 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -53,8 +53,6 @@ type BoxedReadyIterator = Box< type ReadyIteratorFor = BoxedReadyIterator, graph::ExtrinsicFor>; -type PolledIterator = Pin> + Send>>; - /// Log target for transaction pool. /// /// It can be used by other components for logging functionality strictly related to txpool (e.g. diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index 2668fc9a0853..be2cbfb99c5a 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -30,7 +30,7 @@ use crate::{ log_xt::log_xt_trace, }, graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, - PolledIterator, ReadyIteratorFor, LOG_TARGET, + ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; use futures::{channel::oneshot, future, prelude::*, Future, FutureExt}; @@ -38,8 +38,8 @@ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ error::Error as TxPoolError, ChainEvent, ImportNotificationStream, MaintainedTransactionPool, - PoolFuture, PoolStatus, TransactionFor, TransactionPool, TransactionSource, - TransactionStatusStreamFor, TxHash, + PoolStatus, TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, + TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -223,26 +223,19 @@ where &self.api } - fn ready_at_with_timeout_internal( + async fn ready_at_with_timeout_internal( &self, at: Block::Hash, timeout: std::time::Duration, - ) -> PolledIterator { - let timeout = futures_timer::Delay::new(timeout); - let ready_maintained = self.ready_at(at); - let ready_current = self.ready(); - - let ready = async { - select! { - ready = ready_maintained => ready, - _ = timeout => ready_current - } - }; - - Box::pin(ready) + ) -> ReadyIteratorFor { + select! { + ready = self.ready_at(at)=> ready, + _ = futures_timer::Delay::new(timeout)=> self.ready() + } } } +#[async_trait] impl TransactionPool for BasicPool where Block: BlockT, @@ -254,12 +247,12 @@ where graph::base_pool::Transaction, graph::ExtrinsicFor>; type Error = PoolApi::Error; - fn submit_at( + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { + ) -> Result, Self::Error>>, Self::Error> { let pool = self.pool.clone(); let xts = xts .into_iter() @@ -272,39 +265,33 @@ where .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); let number = self.api.resolve_block_number(at); - async move { - let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) - } - .boxed() + let at = HashAndNumber { hash: at, number: number? }; + Ok(pool.submit_at(&at, xts).await) } - fn submit_one( + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error> { + ) -> Result, Self::Error> { let pool = self.pool.clone(); let xt = Arc::from(xt); self.metrics.report(|metrics| metrics.submitted_transactions.inc()); let number = self.api.resolve_block_number(at); - async move { - let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) - .await - } - .boxed() + let at = HashAndNumber { hash: at, number: number? }; + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await } - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { + ) -> Result>>, Self::Error> { let pool = self.pool.clone(); let xt = Arc::from(xt); @@ -312,19 +299,16 @@ where let number = self.api.resolve_block_number(at); - async move { - let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; - - Ok(watcher.into_stream().boxed()) - } - .boxed() + let at = HashAndNumber { hash: at, number: number? }; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; + + Ok(watcher.into_stream().boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -354,9 +338,9 @@ where self.pool.validated_pool().ready_by_hash(hash) } - fn ready_at(&self, at: ::Hash) -> PolledIterator { + async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { let Ok(at) = self.api.resolve_block_number(at) else { - return async { Box::new(std::iter::empty()) as Box<_> }.boxed() + return Box::new(std::iter::empty()) as Box<_> }; let status = self.status(); @@ -365,25 +349,23 @@ where // There could be transaction being added because of some re-org happening at the relevant // block, but this is relative unlikely. if status.ready == 0 && status.future == 0 { - return async { Box::new(std::iter::empty()) as Box<_> }.boxed() + return Box::new(std::iter::empty()) as Box<_> } if self.ready_poll.lock().updated_at() >= at { log::trace!(target: LOG_TARGET, "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return async move { iterator }.boxed() + return iterator } - self.ready_poll - .lock() - .add(at) - .map(|received| { - received.unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Error receiving pending set: {:?}", e); - Box::new(std::iter::empty()) - }) + let result = self.ready_poll.lock().add(at).map(|received| { + received.unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Error receiving pending set: {:?}", e); + Box::new(std::iter::empty()) }) - .boxed() + }); + + result.await } fn ready(&self) -> ReadyIteratorFor { @@ -395,12 +377,12 @@ where pool.futures().cloned().collect::>() } - fn ready_at_with_timeout( + async fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> PolledIterator { - self.ready_at_with_timeout_internal(at, timeout) + ) -> ReadyIteratorFor { + self.ready_at_with_timeout_internal(at, timeout).await } } diff --git a/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs index 4e1b53833b8f..e373c0278d80 100644 --- a/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs +++ b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs @@ -22,16 +22,16 @@ use crate::{ builder::FullClientTransactionPool, graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash}, - ChainApi, FullChainApi, + ChainApi, FullChainApi, ReadyIteratorFor, }; use async_trait::async_trait; use sc_transaction_pool_api::{ ChainEvent, ImportNotificationStream, LocalTransactionFor, LocalTransactionPool, - MaintainedTransactionPool, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + MaintainedTransactionPool, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_runtime::traits::Block as BlockT; -use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; +use std::{collections::HashMap, pin::Pin, sync::Arc}; /// The wrapper for actual object providing implementation of TransactionPool. /// @@ -49,6 +49,7 @@ where + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue; +#[async_trait] impl TransactionPool for TransactionPoolWrapper where Block: BlockT, @@ -68,44 +69,38 @@ where >; type Error = as ChainApi>::Error; - fn submit_at( + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { - self.0.submit_at(at, source, xts) + ) -> Result, Self::Error>>, Self::Error> { + self.0.submit_at(at, source, xts).await } - fn submit_one( + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error> { - self.0.submit_one(at, source, xt) + ) -> Result, Self::Error> { + self.0.submit_one(at, source, xt).await } - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { - self.0.submit_and_watch(at, source, xt) + ) -> Result>>, Self::Error> { + self.0.submit_and_watch(at, source, xt).await } - fn ready_at( + async fn ready_at( &self, at: ::Hash, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - > { - self.0.ready_at(at) + ) -> ReadyIteratorFor> { + self.0.ready_at(at).await } fn ready(&self) -> Box> + Send> { @@ -140,19 +135,12 @@ where self.0.ready_transaction(hash) } - fn ready_at_with_timeout( + async fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send - + '_, - >, - > { - self.0.ready_at_with_timeout(at, timeout) + ) -> ReadyIteratorFor> { + self.0.ready_at_with_timeout(at, timeout).await } } From e27e9b5529297d8dcfbdccd85c93844f0b46f68d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:23:53 +0100 Subject: [PATCH 27/46] api: async_trait - rpc --- substrate/client/rpc/src/author/mod.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index 731f4df2f6f3..6afc871e565a 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -29,7 +29,6 @@ use crate::{ }; use codec::{Decode, Encode}; -use futures::TryFutureExt; use jsonrpsee::{core::async_trait, types::ErrorObject, Extensions, PendingSubscriptionSink}; use sc_rpc_api::check_if_safe; use sc_transaction_pool_api::{ @@ -191,14 +190,16 @@ where }, }; - let submit = self.pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).map_err(|e| { - e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }); - + let pool = self.pool.clone(); let fut = async move { - let stream = match submit.await { + let submit = + pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).await.map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) + }); + + let stream = match submit { Ok(stream) => stream, Err(err) => { let _ = pending.reject(ErrorObject::from(err)).await; From b413f2bde613b49e3c1a947accf7e86c13ddacd6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:41:59 +0100 Subject: [PATCH 28/46] dropped_watcher: new fns are public --- .../src/fork_aware_txpool/dropped_watcher.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index c0a2f470d83d..aa3b93504a71 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -53,11 +53,13 @@ pub struct DroppedTransaction { } impl DroppedTransaction { - fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + /// Creates an new instnance with reason set to `DroppedReason::Usurped(by)`. + pub fn new_usurped(tx_hash: Hash, by: Hash) -> Self { Self { reason: DroppedReason::Usurped(by), tx_hash } } - fn new_enforced_by_limts(tx_hash: Hash) -> Self { + /// Creates an new instnance with reason set to `DroppedReason::LimitsEnforced`. + pub fn new_enforced_by_limts(tx_hash: Hash) -> Self { Self { reason: DroppedReason::LimitsEnforced, tx_hash } } } From 0b34584c7bc02d7c7b88a551ba0f8650356729b0 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:42:25 +0100 Subject: [PATCH 29/46] dropped_watcher: improvement --- .../transaction-pool/src/fork_aware_txpool/dropped_watcher.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index aa3b93504a71..63571347cc52 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -295,6 +295,10 @@ where ctx.ready_transaction_views.iter_mut().for_each(|(tx_hash,views)| { trace!(target: LOG_TARGET,"[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}",tx_hash, views); views.remove(&key); + //todo: merge heads up warning! + if views.is_empty() { + ctx.pending_dropped_transactions.push(*tx_hash); + } }); ctx.future_transaction_views.iter_mut().for_each(|(tx_hash,views)| { From 55dfa7b8bd6fcb9560a53d5acd091c9269bf7958 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:53:12 +0100 Subject: [PATCH 30/46] fatp: handling higher prio with full mempool --- .../fork_aware_txpool/fork_aware_txpool.rs | 114 ++++++++++++++---- .../src/fork_aware_txpool/tx_mem_pool.rs | 83 ++++++++----- .../src/fork_aware_txpool/view.rs | 12 ++ .../src/fork_aware_txpool/view_store.rs | 20 +++ .../src/graph/validated_pool.rs | 50 ++++++++ 5 files changed, 224 insertions(+), 55 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index df19387852ff..021c61dc9fb5 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -49,8 +52,9 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream, + MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -289,7 +293,7 @@ where DroppedReason::LimitsEnforced => {}, }; - mempool.remove_dropped_transaction(&dropped_tx_hash).await; + mempool.remove_transaction(&dropped_tx_hash); view_store.listener.transaction_dropped(dropped); import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } @@ -682,9 +686,9 @@ where submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_| - mempool.remove(insertion.hash) - ) + .inspect_err(|_|{ + mempool.remove_transaction(&insertion.hash); + }) }) }) .collect::>()) @@ -718,31 +722,21 @@ where xt: TransactionFor, ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); - //todo: clones let xt = Arc::from(xt); - let view_store = self.view_store.clone(); - let mempool = self.mempool.clone(); - let metrics = self.metrics.clone(); - let this = (*self).clone(); let InsertionInfo { hash: xt_hash, source: timed_source } = - match mempool.push_watched(source, xt.clone()) { + match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, - Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped) => - if let Some(result) = this.try_drop_something(at, source, xt.clone()).await { - result - } else { - return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()) - }, - Err(e) => return Err(e.into()), // return Err(e) + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(at, source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), }; - metrics.report(|metrics| metrics.submitted_transactions.inc()); + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - view_store - .submit_and_watch(at, timed_source, xt) - .await - .inspect_err(|_| mempool.remove(xt_hash)) + self.view_store.submit_and_watch(at, timed_source, xt).await.inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -1150,7 +1144,7 @@ where for result in watched_results { if let Err(tx_hash) = result { self.view_store.listener.invalidate_transactions(&[tx_hash]); - self.mempool.remove(tx_hash); + self.mempool.remove_transaction(&tx_hash); } } } @@ -1282,6 +1276,74 @@ where fn tx_hash(&self, xt: &TransactionFor) -> TxHash { self.api.hash_and_length(xt).0 } + + /// Attempts to replace a lower-priority transaction in the transaction pool with a new one. + /// + /// This asynchronous function verifies the new transaction against the most recent view. If a + /// transaction with a lower priority exists in the transaction pool, it is replaced with the + /// new transaction. + /// + /// If no lower-priority transaction is found, the function returns an error indicating the + /// transaction was dropped immediately. + async fn attempt_transaction_replacement( + &self, + _: Block::Hash, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + // 1. we should validate at most_recent, and reuse result to submit there. + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + // 2. validate transaction at `at` + let (best_view, _) = self + .view_store + .get_view_at(at, false) + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (tx_hash, validated_tx) = best_view + .pool + .verify_one( + best_view.at.hash, + best_view.at.number, + TimedTransactionSource::from_transaction_source(source, false), + xt.clone(), + crate::graph::CheckBannedBeforeVerify::Yes, + ) + .await; + + // 3. check if most_recent_view contains a transaction with lower priority (actually worse - + // do we want to check timestamp too - no: see #4609?) + // Would be perfect to choose transaction with lowest the number of dependant txs in its + // subtree. + if let Some(worst_tx_hash) = + best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) + { + // 4. if yes - remove worse transaction from mempool and add new one. + log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?}"); + let insertion_info = + self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); + + // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_usurped(worst_tx_hash, tx_hash)); + + // 7. remove transaction from the view_store + self.view_store.remove_transaction_subtree(worst_tx_hash, tx_hash); + + // 8. add to pending_replacements - make sure it will not sneak back via cloned view + + // 9. subemit new one to the view, this will be done upon in the caller + return insertion_info + } + + Err(TxPoolApiError::ImmediatelyDropped) + } } #[async_trait] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index f29ef08e6b6c..776e2f723993 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -101,19 +101,23 @@ where /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: false, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + Self::new(false, source, tx, bytes) } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self::new(true, source, tx, bytes) + } + + /// Creates a new instance of wrapper for a transaction. + fn new( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + ) -> Self { Self { - watched: true, + watched, tx, source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), @@ -279,27 +283,52 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result>, ChainApi::Error> { + tx_to_be_removed: Option>, + ) -> Result>, sc_transaction_pool_api::error::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); + + tx_to_be_removed.inspect(|hash| { + transactions.remove(hash); + }); + let result = match ( - !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), ) { - (true, false) => { + (false, false) => { let source = tx.source(); transactions.insert(hash, Arc::from(tx)); Ok(InsertionInfo::new(hash, source)) }, (_, true) => - Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), - (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), }; log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } + /// Attempts to replace an existing transaction in the memory pool with a new one. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_replace_transaction( + &self, + new_xt: ExtrinsicFor, + source: TransactionSource, + watched: bool, + replaced_tx_hash: ExtrinsicHash, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let (hash, length) = self.api.hash_and_length(&new_xt); + self.try_insert( + hash, + TxInMemPool::new(watched, source, new_xt, length), + Some(replaced_tx_hash), + ) + } + /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. /// /// Returns the vector of results for each transaction, the order corresponds to the input @@ -308,12 +337,13 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec>, ChainApi::Error>> { + ) -> Vec>, sc_transaction_pool_api::error::Error>> + { let result = xts .iter() .map(|xt| { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length), None) }) .collect::>(); result @@ -325,18 +355,9 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result>, ChainApi::Error> { + ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) - } - - /// Removes transaction from the memory pool which are specified by the given list of hashes. - pub(super) async fn remove_dropped_transaction( - &self, - dropped: &ExtrinsicHash, - ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(dropped) + self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length), None) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -362,9 +383,13 @@ where .collect::>() } - /// Removes a transaction from the memory pool based on a given hash. - pub(super) fn remove(&self, hash: ExtrinsicHash) { - let _ = self.transactions.write().remove(&hash); + /// Removes a transaction with given hash from the memory pool. + pub(super) fn remove_transaction( + &self, + hash: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction"); + self.transactions.write().remove(hash) } /// Revalidates a batch of transactions against the provided finalized block. diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 0fa90ccfcb31..57e768f522ee 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -460,4 +460,16 @@ where const IGNORE_BANNED: bool = false; self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() } + + /// Removes the whole transaction subtree from the inner pool. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub(super) fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) -> Vec> { + self.pool.validated_pool().remove_subtree(tx_hash, replaced_with) + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a06c051f0a7e..7687a5699747 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -690,4 +690,24 @@ where }; let _results = futures::future::join_all(submit_futures).await; } + + /// Removes the whole transaction subtree from every view within the view store. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub(super) fn remove_transaction_subtree( + &self, + xt_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&xt_hash)) + .for_each(|(_, view)| { + view.remove_subtree(xt_hash, replaced_with); + }); + } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index a57e67d8361d..91a0307c81ac 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -126,6 +126,8 @@ impl Clone for ValidatedPool { } } +type BaseTransactionFor = base::Transaction, ExtrinsicFor>; + impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { @@ -688,6 +690,54 @@ impl ValidatedPool { listener.future(&f.hash); }); } + + /// Searches the base pool for a transaction with a lower priority than the given one. + /// + /// Returns the hash of a lower-priority transaction if found, otherwise `None`. + pub fn find_transaction_with_lower_prio( + &self, + tx: ValidatedTransactionFor, + ) -> Option> { + match tx { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => { + let pool = self.pool.read(); + pool.fold_ready( + Some((priority, Option::>>::None)), + |acc, current| { + let (priority, _) = acc.as_ref().expect("acc is initialized. qed"); + if current.priority < *priority { + return Some((current.priority, Some(current.clone()))) + } else { + return acc + } + }, + ) + .map(|r| r.1.map(|tx| tx.hash))? + }, + _ => None, + } + } + + /// Removes the whole transaction subtree the pool. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) -> Vec> { + self.pool + .write() + .remove_subtree(&[tx_hash]) + .into_iter() + .map(|tx| tx.hash) + .inspect(|tx_hash| { + let mut listener = self.listener.write(); + listener.usurped(&tx_hash, &replaced_with) + }) + .collect::>() + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) From 11f7226318b87b84d0f45fd75fdb51116b174601 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:53:46 +0100 Subject: [PATCH 31/46] graph: fold_ready improved --- .../transaction-pool/src/graph/base_pool.rs | 54 +++++++++++-------- .../transaction-pool/src/graph/ready.rs | 10 ++-- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 3d70490de900..b3f11852b138 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -432,6 +432,16 @@ impl BasePool(&self, init: R, mut f: F) -> R + where + F: FnMut(R, &Arc>) -> R, + { + self.ready + .fold::(init, |acc, current| f(acc, ¤t.transaction.transaction)) + } + /// Makes sure that the transactions in the queues stay within provided limits. /// /// Removes and returns worst transactions from the queues and all transactions that depend on @@ -447,27 +457,29 @@ impl BasePool, _>(|worst, current| { - let transaction = ¤t.transaction; - worst - .map(|worst| { - // Here we don't use `TransactionRef`'s ordering implementation because - // while it prefers priority like need here, it also prefers older - // transactions for inclusion purposes and limit enforcement needs to prefer - // newer transactions instead and drop the older ones. - match worst.transaction.priority.cmp(&transaction.transaction.priority) { - Ordering::Less => worst, - Ordering::Equal => - if worst.insertion_id > transaction.insertion_id { - transaction.clone() - } else { - worst - }, - Ordering::Greater => transaction.clone(), - } - }) - .or_else(|| Some(transaction.clone())) - }); + let worst = + self.ready.fold::>, _>(None, |worst, current| { + let transaction = ¤t.transaction; + worst + .map(|worst| { + // Here we don't use `TransactionRef`'s ordering implementation because + // while it prefers priority like need here, it also prefers older + // transactions for inclusion purposes and limit enforcement needs to + // prefer newer transactions instead and drop the older ones. + match worst.transaction.priority.cmp(&transaction.transaction.priority) + { + Ordering::Less => worst, + Ordering::Equal => + if worst.insertion_id > transaction.insertion_id { + transaction.clone() + } else { + worst + }, + Ordering::Greater => transaction.clone(), + } + }) + .or_else(|| Some(transaction.clone())) + }); if let Some(worst) = worst { removed.append(&mut self.remove_subtree(&[worst.transaction.hash.clone()])) diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 9061d0e25581..b8aef99e638d 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -232,12 +232,10 @@ impl ReadyTransactions { Ok(replaced) } - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>( - &mut self, - f: F, - ) -> Option { - self.ready.read().values().fold(None, f) + /// Fold a list of ready transactions to compute a single value using initial value of + /// accumulator. + pub fn fold) -> R>(&self, init: R, f: F) -> R { + self.ready.read().values().fold(init, f) } /// Returns true if given transaction is part of the queue. From e133feb4336c056da210261c2546ff2caa65c88c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:59:49 +0100 Subject: [PATCH 32/46] graph: make some staff public --- substrate/client/transaction-pool/src/graph/mod.rs | 1 + substrate/client/transaction-pool/src/graph/pool.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index d93898b1b22a..43f93dc26255 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -43,4 +43,5 @@ pub use self::pool::{ }; pub use validated_pool::{IsValidator, ValidatedTransaction}; +pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index e7da3a4eb6c0..eb5a005940c7 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -161,7 +161,7 @@ impl Default for Options { /// Should we check that the transaction is banned /// in the pool, before we verify it? #[derive(Copy, Clone)] -enum CheckBannedBeforeVerify { +pub(crate) enum CheckBannedBeforeVerify { Yes, No, } @@ -409,7 +409,7 @@ impl Pool { } /// Returns future that validates single transaction at given block. - async fn verify_one( + pub(crate) async fn verify_one( &self, block_hash: ::Hash, block_number: NumberFor, From b4a5d1b6a22a89c058bee74a9578e32a2e192d5a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 13:49:09 +0100 Subject: [PATCH 33/46] tests added --- .../transaction-pool/tests/fatp_common/mod.rs | 19 +- .../transaction-pool/tests/fatp_prios.rs | 287 +++++++++++++++++- 2 files changed, 298 insertions(+), 8 deletions(-) diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index aecd83360f1e..d9bc7d30b13b 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -192,12 +192,9 @@ macro_rules! assert_ready_iterator { let output: Vec<_> = ready_iterator.collect(); log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); log::debug!(target:LOG_TARGET, "output: {:#?}", output); + let output = output.into_iter().map(|t|t.hash).collect::>(); assert_eq!(expected.len(), output.len()); - assert!( - output.iter().zip(expected.iter()).all(|(o,e)| { - o.hash == *e - }) - ); + assert_eq!(output,expected); }}; } @@ -215,6 +212,18 @@ macro_rules! assert_future_iterator { }}; } +#[macro_export] +macro_rules! assert_watcher_stream { + ($stream:ident, [$( $event:expr ),*]) => {{ + let expected = vec![ $($event),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?} {}, block now:", expected, expected.len()); + let output = futures::executor::block_on_stream($stream).take(expected.len()).collect::>(); + log::debug!(target:LOG_TARGET, "output: {:#?}", output); + assert_eq!(expected.len(), output.len()); + assert_eq!(output, expected); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 41bc374b38f4..08e83e92b187 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -20,13 +20,14 @@ pub mod fatp_common; -use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use fatp_common::{invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; use futures::{executor::block_on, FutureExt}; use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; - #[test] fn fatp_prio_ready_higher_evicts_lower() { sp_tracing::try_init_simple(); @@ -247,3 +248,283 @@ fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); } + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); + assert_ready_iterator!(header02.hash(), pool, []); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!( + xt2_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); + assert_eq!( + xt3_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt5).0)] + ); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + // let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + // block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!( + xt0_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt1_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt2_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_ready_iterator!(header01.hash(), pool, [xt3]); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!( + xt0_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt1_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt2_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(2).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 1); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]); + + let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); + assert!(matches!(result2.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); + let result3 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).map(|_| ()); + assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); +} + +#[test] +fn fatp_prios_watcher_full_mempool_does_not_keep_dropped_transaction() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt3]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); +} From 43386716d4ca4e2f2ac18ad24a26263e2de8b735 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 13:52:08 +0100 Subject: [PATCH 34/46] type removed --- .../transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 021c61dc9fb5..6d98b3469533 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -118,8 +118,6 @@ where } } -type PolledIterator = Pin> + Send>>; - /// The fork-aware transaction pool. /// /// It keeps track of every fork and provides the set of transactions that is valid for every fork. From 302d69b0d38eb65d9f1cc50487dfc79b92c4f525 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 10:34:51 +0100 Subject: [PATCH 35/46] improvements --- .../fork_aware_txpool/fork_aware_txpool.rs | 21 +++++----- .../src/fork_aware_txpool/view.rs | 11 +++-- .../src/fork_aware_txpool/view_store.rs | 34 ++++++++++----- .../client/transaction-pool/src/graph/mod.rs | 2 +- .../src/graph/validated_pool.rs | 41 +++++++++++-------- 5 files changed, 67 insertions(+), 42 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 6d98b3469533..a6f3697807a7 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,10 +31,7 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{ - dropped_watcher::{DroppedReason, DroppedTransaction}, - revalidation_worker, - }, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -1275,7 +1272,8 @@ where self.api.hash_and_length(xt).0 } - /// Attempts to replace a lower-priority transaction in the transaction pool with a new one. + /// Attempts to find and replace a lower-priority transaction in the transaction pool with a new + /// one. /// /// This asynchronous function verifies the new transaction against the most recent view. If a /// transaction with a lower priority exists in the transaction pool, it is replaced with the @@ -1327,12 +1325,13 @@ where self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); // 5. notify listner - self.view_store - .listener - .transaction_dropped(DroppedTransaction::new_usurped(worst_tx_hash, tx_hash)); - - // 7. remove transaction from the view_store - self.view_store.remove_transaction_subtree(worst_tx_hash, tx_hash); + // 6. remove transaction from the view_store + self.view_store.remove_transaction_subtree( + worst_tx_hash, + |listener, removed_tx_hash| { + listener.usurped(&removed_tx_hash, &tx_hash); + }, + ); // 8. add to pending_replacements - make sure it will not sneak back via cloned view diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 57e768f522ee..1da80678fa1b 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -465,11 +465,14 @@ where /// /// Intended to be called when removal is a result of replacement. Provided `replaced_with` /// transaction hash is used in emitted _usurped_ event. - pub(super) fn remove_subtree( + pub fn remove_subtree( &self, tx_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) -> Vec> { - self.pool.validated_pool().remove_subtree(tx_hash, replaced_with) + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + self.pool.validated_pool().remove_subtree(tx_hash, listener_action) } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 7687a5699747..3c095ae2a599 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -38,7 +38,7 @@ use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, time::Instant, }; @@ -691,23 +691,37 @@ where let _results = futures::future::join_all(submit_futures).await; } - /// Removes the whole transaction subtree from every view within the view store. + /// Removes a transaction subtree from the every view in the view_store, starting from the given + /// transaction hash. /// - /// Intended to be called when removal is a result of replacement. Provided `replaced_with` - /// transaction hash is used in emitted _usurped_ event. - pub(super) fn remove_transaction_subtree( + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from every view. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. Note listener may be called multiple times for the + /// same hash. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. Vector contains only unique hashes. + pub(super) fn remove_transaction_subtree( &self, xt_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) { + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + let mut seen = HashSet::new(); let active_views = self.active_views.read(); let inactive_views = self.inactive_views.read(); active_views .iter() .chain(inactive_views.iter()) .filter(|(_, view)| view.is_imported(&xt_hash)) - .for_each(|(_, view)| { - view.remove_subtree(xt_hash, replaced_with); - }); + .map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) + .flatten() + .filter(|xt_hash| seen.insert(*xt_hash)) + .collect() } } diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 43f93dc26255..0810b532273a 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -41,7 +41,7 @@ pub use self::pool::{ BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, TransactionFor, ValidatedTransactionFor, }; -pub use validated_pool::{IsValidator, ValidatedTransaction}; +pub use validated_pool::{IsValidator, Listener, ValidatedTransaction}; pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 91a0307c81ac..19b55ea79438 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -18,7 +18,6 @@ use std::{ collections::{HashMap, HashSet}, - hash, sync::Arc, }; @@ -26,17 +25,15 @@ use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; -use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ - traits::{self, SaturatedConversion}, + traits::SaturatedConversion, transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; use super::{ base_pool::{self as base, PruneStatus}, - listener::Listener, pool::{ BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, }, @@ -85,6 +82,8 @@ impl ValidatedTransaction { pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +pub type Listener = super::listener::Listener, B>; + /// A closure that returns true if the local node is a validator that can author blocks. #[derive(Clone)] pub struct IsValidator(Arc bool + Send + Sync>>); @@ -106,7 +105,7 @@ pub struct ValidatedPool { api: Arc, is_validator: IsValidator, options: Options, - listener: RwLock, B>>, + listener: RwLock>, pub(crate) pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, @@ -718,31 +717,41 @@ impl ValidatedPool { } } - /// Removes the whole transaction subtree the pool. + /// Removes a transaction subtree from the pool, starting from the given transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from the pool. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. /// - /// Intended to be called when removal is a result of replacement. Provided `replaced_with` - /// transaction hash is used in emitted _usurped_ event. - pub fn remove_subtree( + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. + pub fn remove_subtree( &self, tx_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) -> Vec> { + listener_action: F, + ) -> Vec> + where + F: Fn(&mut Listener, ExtrinsicHash), + { self.pool .write() .remove_subtree(&[tx_hash]) .into_iter() - .map(|tx| tx.hash) - .inspect(|tx_hash| { + .map(|tx| { + let removed_tx_hash = tx.hash; let mut listener = self.listener.write(); - listener.usurped(&tx_hash, &replaced_with) + listener_action(&mut *listener, removed_tx_hash); + removed_tx_hash }) .collect::>() } } -fn fire_events(listener: &mut Listener, imported: &base::Imported) +fn fire_events(listener: &mut Listener, imported: &base::Imported, Ex>) where - H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { match *imported { From 3ca1e576920181cea6e65d3ee54e9109024be769 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:48:08 +0100 Subject: [PATCH 36/46] make use of your brain --- .../fork_aware_txpool/fork_aware_txpool.rs | 8 +-- .../transaction-pool/src/graph/listener.rs | 4 +- .../src/graph/validated_pool.rs | 2 +- .../transaction-pool/tests/fatp_prios.rs | 50 +++++-------------- 4 files changed, 20 insertions(+), 44 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index a6f3697807a7..58a73f1c2259 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1320,23 +1320,23 @@ where best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) { // 4. if yes - remove worse transaction from mempool and add new one. - log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?}"); + log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?} replaced by {tx_hash:?}"); let insertion_info = - self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); + self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; // 5. notify listner // 6. remove transaction from the view_store self.view_store.remove_transaction_subtree( worst_tx_hash, |listener, removed_tx_hash| { - listener.usurped(&removed_tx_hash, &tx_hash); + listener.limits_enforced(&removed_tx_hash); }, ); // 8. add to pending_replacements - make sure it will not sneak back via cloned view // 9. subemit new one to the view, this will be done upon in the caller - return insertion_info + return Ok(insertion_info) } Err(TxPoolApiError::ImmediatelyDropped) diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 41daf5491f70..7b09ee4c6409 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -126,8 +126,8 @@ impl Listener ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.limit_enforced(h); + listener.limits_enforced(h); } removed diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 08e83e92b187..3c7db35323d8 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -310,23 +310,17 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { assert_pool_status!(header03.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 4); - let header04 = api.push_block_with_parent(header03.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + // let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + // block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); + assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); assert_ready_iterator!(header02.hash(), pool, []); assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); - - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!( - xt2_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); - assert_eq!( - xt3_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt5).0)] - ); } #[test] @@ -370,18 +364,9 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { assert_pool_status!(header01.hash(), &pool, 2, 0); assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); - assert_watcher_stream!( - xt0_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt1_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt2_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); } @@ -428,18 +413,9 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { assert_pool_status!(header02.hash(), &pool, 2, 0); assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); - assert_watcher_stream!( - xt0_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt1_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt2_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); } From eecc8491ccf58990697f4c700a1191f56937884a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:40:08 +0100 Subject: [PATCH 37/46] fatp: pending actions now support removals --- .../fork_aware_txpool/fork_aware_txpool.rs | 9 +- .../src/fork_aware_txpool/view_store.rs | 166 ++++++++++++++---- 2 files changed, 141 insertions(+), 34 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 58a73f1c2259..5ed241796171 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -1325,6 +1328,10 @@ where self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(worst_tx_hash)); + // 6. remove transaction from the view_store self.view_store.remove_transaction_subtree( worst_tx_hash, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 3c095ae2a599..9de3e7422981 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -43,15 +43,13 @@ use std::{ time::Instant, }; -/// Helper struct to keep the context for transaction replacements. +/// Helper struct to maintain the context for pending transaction submission, executed for +/// newly inserted views. #[derive(Clone)] -struct PendingTxReplacement +struct PendingTxSubmission where ChainApi: graph::ChainApi, { - /// Indicates if the new transaction was already submitted to all the views in the view_store. - /// If true, it can be removed after inserting any new view. - processed: bool, /// New transaction replacing the old one. xt: ExtrinsicFor, /// Source of the transaction. @@ -60,13 +58,84 @@ where watched: bool, } -impl PendingTxReplacement +/// Helper type representing the callback allowing to trigger per-transaction events on +/// `ValidatedPool`'s listener. +type RemovalListener = + Arc, ExtrinsicHash) + Send + Sync>; + +/// Helper struct to maintain the context for pending transaction removal, executed for +/// newly inserted views. +struct PendingTxRemoval +where + ChainApi: graph::ChainApi, +{ + /// Hash of the transaction that will be removed, + xt_hash: ExtrinsicHash, + /// Action that shall be executed on underlying `ValidatedPool`'s listener. + listener_action: RemovalListener, +} + +/// This enum represents an action that should be executed on the newly built +/// view before this view is inserted into the view store. +enum PreInsertAction +where + ChainApi: graph::ChainApi, +{ + /// Represents the action of submitting a new transaction. Intended to use to handle usurped + /// transactions. + SubmitTx(PendingTxSubmission), + + /// Represents the action of removing a subtree of transactions. + RemoveSubtree(PendingTxRemoval), +} + +/// Represents a task awaiting execution, to be performed immediately prior to the view insertion +/// into the view store. +struct PendingPreInsertTask where ChainApi: graph::ChainApi, { - /// Creates new unprocessed instance of pending transaction replacement. - fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { - Self { processed: false, xt, source, watched } + /// The action to be applied when inserting a new view. + action: PreInsertAction, + /// Indicates if the action was already applied to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, +} + +impl PendingPreInsertTask +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction submission. + fn new_submission_action( + xt: ExtrinsicFor, + source: TimedTransactionSource, + watched: bool, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::SubmitTx(PendingTxSubmission { xt, source, watched }), + } + } + + /// Creates new processed instance of pending transaction removal. + fn new_removal_action( + xt_hash: ExtrinsicHash, + listener: RemovalListener, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::RemoveSubtree(PendingTxRemoval { + xt_hash, + listener_action: listener, + }), + } + } + + /// Marks a task as done for every view present in view store. Basically means that can be + /// removed on new view insertion. + fn mark_processed(&mut self) { + self.processed = true; } } @@ -100,9 +169,8 @@ where /// notifcication threads. It is meant to assure that replaced transaction is also removed from /// newly built views in maintain process. /// - /// The map's key is hash of replaced extrinsic. - pending_txs_replacements: - RwLock, PendingTxReplacement>>, + /// The map's key is hash of actionable extrinsic (to avoid duplicated entries). + pending_txs_tasks: RwLock, PendingPreInsertTask>>, } impl ViewStore @@ -124,7 +192,7 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, - pending_txs_replacements: Default::default(), + pending_txs_tasks: Default::default(), } } @@ -575,8 +643,12 @@ where replaced: ExtrinsicHash, watched: bool, ) { - if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { - entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(replaced) { + entry.insert(PendingPreInsertTask::new_submission_action( + xt.clone(), + source.clone(), + watched, + )); } else { return }; @@ -586,8 +658,8 @@ where self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; - if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { - replacement.processed = true; + if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) { + replacement.mark_processed(); } } @@ -596,18 +668,25 @@ where /// After application, all already processed replacements are removed. async fn apply_pending_tx_replacements(&self, view: Arc>) { let mut futures = vec![]; - for replacement in self.pending_txs_replacements.read().values() { - let xt_hash = self.api.hash_and_length(&replacement.xt).0; - futures.push(self.replace_transaction_in_view( - view.clone(), - replacement.source.clone(), - replacement.xt.clone(), - xt_hash, - replacement.watched, - )); + for replacement in self.pending_txs_tasks.read().values() { + match replacement.action { + PreInsertAction::SubmitTx(ref submission) => { + let xt_hash = self.api.hash_and_length(&submission.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + submission.source.clone(), + submission.xt.clone(), + xt_hash, + submission.watched, + )); + }, + PreInsertAction::RemoveSubtree(ref removal) => { + view.remove_subtree(removal.xt_hash, &*removal.listener_action); + }, + } } let _results = futures::future::join_all(futures).await; - self.pending_txs_replacements.write().retain(|_, r| r.processed); + self.pending_txs_tasks.write().retain(|_, r| r.processed); } /// Submits `xt` to the given view. @@ -702,6 +781,9 @@ where /// allows to trigger the required events. Note listener may be called multiple times for the /// same hash. /// + /// Function will also schedule view pre-insertion actions to ensure that transactions will be + /// removed from newly created view. + /// /// Returns a vector containing the hashes of all removed transactions, including the root /// transaction specified by `tx_hash`. Vector contains only unique hashes. pub(super) fn remove_transaction_subtree( @@ -710,18 +792,36 @@ where listener_action: F, ) -> Vec> where - F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + F: Fn(&mut crate::graph::Listener, ExtrinsicHash) + + Clone + + Send + + Sync + + 'static, { + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(xt_hash) { + entry.insert(PendingPreInsertTask::new_removal_action( + xt_hash, + Arc::from(listener_action.clone()), + )); + }; + let mut seen = HashSet::new(); - let active_views = self.active_views.read(); - let inactive_views = self.inactive_views.read(); - active_views + + let removed = self + .active_views + .read() .iter() - .chain(inactive_views.iter()) + .chain(self.inactive_views.read().iter()) .filter(|(_, view)| view.is_imported(&xt_hash)) .map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) .flatten() .filter(|xt_hash| seen.insert(*xt_hash)) - .collect() + .collect(); + + if let Some(removal_action) = self.pending_txs_tasks.write().get_mut(&xt_hash) { + removal_action.mark_processed(); + } + + removed } } From 213a27a2dda7953518decc7a48c38573b9c2330c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:43:41 +0100 Subject: [PATCH 38/46] validated_pool: SubmitOutcome --- .../client/transaction-pool/src/graph/mod.rs | 4 +- .../client/transaction-pool/src/graph/pool.rs | 10 +-- .../src/graph/validated_pool.rs | 69 ++++++++++++++++--- 3 files changed, 69 insertions(+), 14 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 0810b532273a..008503fb4cdf 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -41,7 +41,9 @@ pub use self::pool::{ BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, TransactionFor, ValidatedTransactionFor, }; -pub use validated_pool::{IsValidator, Listener, ValidatedTransaction}; +pub use validated_pool::{ + BaseSubmitOutcome, IsValidator, Listener, ValidatedPoolSubmitOutcome, ValidatedTransaction, +}; pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index eb5a005940c7..6a8edbdf1d3d 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -37,7 +37,7 @@ use std::{ use super::{ base_pool as base, validated_pool::{IsValidator, ValidatedPool, ValidatedTransaction}, - watcher::Watcher, + ValidatedPoolSubmitOutcome, }; /// Modification notification event stream type; @@ -182,7 +182,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -194,7 +194,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -205,7 +205,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, B::Error> { + ) -> Result, B::Error> { let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -216,7 +216,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { let (_, tx) = self .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) .await; diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 95515d795580..8886a4b91eb5 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -24,7 +24,7 @@ use std::{ use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::SaturatedConversion, @@ -78,10 +78,11 @@ impl ValidatedTransaction { } } -/// A type of validated transaction stored in the pool. +/// A type of validated transaction stored in the validated pool. pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +/// A type alias representing ValidatedPool listener for given ChainApi type. pub type Listener = super::listener::Listener, B>; /// A closure that returns true if the local node is a validator that can author blocks. @@ -100,6 +101,54 @@ impl From bool + Send + Sync>> for IsValidator { } } +/// Represents the result of `submit` or `submit_and_watch` operations. +pub struct BaseSubmitOutcome { + /// The hash of the submitted transaction. + hash: ExtrinsicHash, + /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. + watcher: Option, + /// The priority of the transaction. Defaults to zero if unknown. + priority: TransactionPriority, +} + +/// Type alias to outcome of submission to `ValidatedPool`. +pub type ValidatedPoolSubmitOutcome = + BaseSubmitOutcome, ExtrinsicHash>>; + +impl BaseSubmitOutcome { + /// Creates a new instance with given hash and priority. + pub fn new(hash: ExtrinsicHash, priority: TransactionPriority) -> Self { + Self { hash, priority, watcher: None } + } + + /// Creates a new instance with given hash and zeroed priority. + pub fn new_no_priority(hash: ExtrinsicHash) -> Self { + Self { hash, priority: 0u64, watcher: None } + } + + /// Sets the transaction watcher. + pub fn with_watcher(mut self, watcher: W) -> Self { + self.watcher = Some(watcher); + self + } + + /// Provides priority of submitted transaction. + pub fn priority(&self) -> TransactionPriority { + self.priority + } + + /// Provides hash of submitted transaction. + pub fn hash(&self) -> ExtrinsicHash { + self.hash + } + + /// Provides a watcher. Should only be called on outcomes of `submit_and_watch`. Otherwise will + /// panic (that would mean logical error in program). + pub fn expect_watcher(&mut self) -> W { + self.watcher.take().expect("watcher was set in submit_and_watch. qed") + } +} + /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, @@ -176,7 +225,7 @@ impl ValidatedPool { pub fn submit( &self, txs: impl IntoIterator>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let results = txs .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) @@ -192,7 +241,7 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => + Ok(outcome) if removed.contains(&outcome.hash) => Err(error::Error::ImmediatelyDropped.into()), other => other, }) @@ -200,9 +249,13 @@ impl ValidatedPool { } /// Submit single pre-validated transaction to the pool. - fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { + fn submit_one( + &self, + tx: ValidatedTransactionFor, + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + let priority = tx.priority; log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash); if !tx.propagate && !(self.is_validator.0)() { return Err(error::Error::Unactionable.into()) @@ -230,7 +283,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(*imported.hash()) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), priority)) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); @@ -294,7 +347,7 @@ impl ValidatedPool { pub fn submit_and_watch( &self, tx: ValidatedTransactionFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { let hash = self.api.hash_and_length(&tx.data).0; @@ -302,7 +355,7 @@ impl ValidatedPool { self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) .pop() .expect("One extrinsic passed; one result returned; qed") - .map(|_| watcher) + .map(|outcome| outcome.with_watcher(watcher)) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); From bb7801dccff959cf9443597c81820f58cdd4ea40 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:47:01 +0100 Subject: [PATCH 39/46] view/view_store: SubmitOutcome --- .../src/fork_aware_txpool/view.rs | 8 +- .../src/fork_aware_txpool/view_store.rs | 73 +++++++++++++------ 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 1da80678fa1b..4f26b5bad84c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -28,7 +28,7 @@ use crate::{ common::log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, - IsValidator, ValidatedTransaction, ValidatedTransactionFor, + IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; @@ -158,7 +158,7 @@ where pub(super) async fn submit_many( &self, xts: impl IntoIterator)>, - ) -> Vec, ChainApi::Error>> { + ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); @@ -173,7 +173,7 @@ where &self, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, ChainApi::Error> { + ) -> Result, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); self.pool.submit_and_watch(&self.at, source, xt).await } @@ -182,7 +182,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 9de3e7422981..4ae54e99264c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -27,7 +27,7 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, TransactionFor, + BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, }, ReadyIteratorFor, LOG_TARGET, }; @@ -173,6 +173,18 @@ where pending_txs_tasks: RwLock, PendingPreInsertTask>>, } +/// Type alias to outcome of submission to `ViewStore`. +pub(super) type ViewStoreSubmitOutcome = + BaseSubmitOutcome>; + +impl From> + for ViewStoreSubmitOutcome +{ + fn from(value: ValidatedPoolSubmitOutcome) -> Self { + Self::new(value.hash(), value.priority()) + } +} + impl ViewStore where Block: BlockT, @@ -200,7 +212,7 @@ where pub(super) async fn submit( &self, xts: impl IntoIterator)> + Clone, - ) -> HashMap, ChainApi::Error>>> { + ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); active_views @@ -208,7 +220,16 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(xts).await) } + async move { + ( + view.at.hash, + view.submit_many(xts) + .await + .into_iter() + .map(|r| r.map(Into::into)) + .collect::>(), + ) + } }) .collect::>() }; @@ -221,7 +242,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let active_views = self .active_views .read() @@ -236,12 +257,14 @@ where .map(|view| view.submit_local(xt.clone())) .find_or_first(Result::is_ok); - if let Some(Err(err)) = result { - log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); - return Err(err) - }; - - Ok(tx_hash) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + Err(err) + }, + None => Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash)), + Some(Ok(r)) => Ok(r.into()), + } } /// Import a single extrinsic and starts to watch its progress in the pool. @@ -256,7 +279,7 @@ where _at: Block::Hash, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) @@ -271,13 +294,13 @@ where let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( tx_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); - Ok(()) + Ok(result) }, Err(e) => Err(e), } @@ -285,17 +308,21 @@ where }) .collect::>() }; - let maybe_error = futures::future::join_all(submit_and_watch_futures) + let result = futures::future::join_all(submit_and_watch_futures) .await .into_iter() .find_or_first(Result::is_ok); - if let Some(Err(err)) = maybe_error { - log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); - }; - - Ok(external_watcher) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); + return Err(err); + }, + Some(Ok(result)) => + Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), + None => + Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash).with_watcher(external_watcher)), + } } /// Returns the pool status for every active view. @@ -702,11 +729,11 @@ where ) { if watched { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( xt_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); }, Err(e) => { From bbb75b19da583d51261b466624ffe127ef621af8 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:48:02 +0100 Subject: [PATCH 40/46] mempool: update_transaction stub --- .../src/fork_aware_txpool/tx_mem_pool.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 776e2f723993..3a1a13971015 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,7 +26,10 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use super::{ + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStoreSubmitOutcome, +}; use crate::{ common::log_xt::log_xt_trace, graph, @@ -487,6 +490,10 @@ where }); self.listener.invalidate_transactions(&invalid_hashes); } + + pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { + // todo!() + } } #[cfg(test)] From fe7c2f944734a0d9072d848006df82bc65f6835f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:50:01 +0100 Subject: [PATCH 41/46] fatp: SubmitOutcome --- .../fork_aware_txpool/fork_aware_txpool.rs | 35 +++++++++++++++---- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 5ed241796171..d85c48fd7bbd 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -602,7 +602,7 @@ where /// out: /// [ Ok(xth0), Ok(xth1), Err ] /// ``` -fn reduce_multiview_result(input: HashMap>>) -> Vec> { +fn reduce_multiview_result(input: HashMap>>) -> Vec> { let mut values = input.values(); let Some(first) = values.next() else { return Default::default(); @@ -689,6 +689,10 @@ where }) }) }) + .map(|r| r.map(|r| { + mempool.update_transaction(&r); + r.hash() + })) .collect::>()) } @@ -732,9 +736,16 @@ where self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - self.view_store.submit_and_watch(at, timed_source, xt).await.inspect_err(|_| { - self.mempool.remove_transaction(&xt_hash); - }) + self.view_store + .submit_and_watch(at, timed_source, xt) + .await + .inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) + .map(|mut outcome| { + self.mempool.update_transaction(&outcome); + outcome.expect_watcher() + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -868,7 +879,13 @@ where .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + self.view_store + .submit_local(xt) + .map(|outcome| { + self.mempool.update_transaction(&outcome); + outcome.hash() + }) + .or_else(|_| Ok(xt_hash)) } } @@ -1120,7 +1137,11 @@ where .await .into_iter() .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .map(|(result, tx_hash)| { + result + .map(|outcome| self.mempool.update_transaction(&outcome.into())) + .or_else(|_| Err(tx_hash)) + }) .collect::>(); let submitted_count = watched_results.len(); @@ -1495,7 +1516,7 @@ mod reduce_multiview_result_tests { fn empty() { sp_tracing::try_init_simple(); let input = HashMap::default(); - let r = reduce_multiview_result::(input); + let r = reduce_multiview_result::(input); assert!(r.is_empty()); } From d5475105aab359fdd35e388bc0dedbdbd172cfe7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:51:02 +0100 Subject: [PATCH 42/46] fatp: todo added --- .../transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index d85c48fd7bbd..dd396e6178ca 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1138,6 +1138,8 @@ where .into_iter() .zip(hashes) .map(|(result, tx_hash)| { + //todo: we may need a bool flag here indicating if we need to update priority + //(premature optimization) result .map(|outcome| self.mempool.update_transaction(&outcome.into())) .or_else(|_| Err(tx_hash)) From 9e4d43fc52dff7802c4d62a6e2da6928302272af Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:51:53 +0100 Subject: [PATCH 43/46] single-state txpool: SubmitOutcome integration --- .../single_state_txpool.rs | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index be2cbfb99c5a..984f7e8e9b3e 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -266,7 +266,12 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) + Ok(pool + .submit_at(&at, xts) + .await + .into_iter() + .map(|result| result.map(|outcome| outcome.hash())) + .collect()) } async fn submit_one( @@ -284,6 +289,7 @@ where let at = HashAndNumber { hash: at, number: number? }; pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) .await + .map(|outcome| outcome.hash()) } async fn submit_and_watch( @@ -300,15 +306,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; - - Ok(watcher.into_stream().boxed()) + pool.submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await + .map(|mut outcome| outcome.expect_watcher().into_stream().boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -476,7 +480,11 @@ where validity, ); - self.pool.validated_pool().submit(vec![validated]).remove(0) + self.pool + .validated_pool() + .submit(vec![validated]) + .remove(0) + .map(|outcome| outcome.hash()) } } From 773474831679df1800dd0af728d2502e309de5b1 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:52:45 +0100 Subject: [PATCH 44/46] tests: SubmitOutcome fixes --- .../client/transaction-pool/src/graph/pool.rs | 70 +++++++++++++------ .../src/single_state_txpool/revalidation.rs | 5 +- .../client/transaction-pool/tests/pool.rs | 14 ++-- 3 files changed, 61 insertions(+), 28 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 6a8edbdf1d3d..b7aa7276f5e9 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -516,6 +516,7 @@ mod tests { .into(), ), ) + .map(|outcome| outcome.hash()) .unwrap(); // then @@ -544,7 +545,10 @@ mod tests { // when let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)) + .into_iter() + .map(|r| r.map(|o| o.hash())) + .collect::>(); log::debug!("--> {hashes:#?}"); // then @@ -568,7 +572,8 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -591,7 +596,8 @@ mod tests { let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -619,7 +625,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash1 = block_on( pool.submit_one( &han_of_block0, @@ -633,7 +640,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // future doesn't count let _hash = block_on( pool.submit_one( @@ -648,7 +656,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -681,7 +690,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash2 = block_on( pool.submit_one( &han_of_block0, @@ -695,7 +705,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash3 = block_on( pool.submit_one( &han_of_block0, @@ -709,7 +720,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when pool.validated_pool.clear_stale(&api.expect_hash_and_number(5)); @@ -741,7 +753,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1])); @@ -769,8 +782,9 @@ mod tests { let api = Arc::new(TestApi::default()); let pool = Pool::new(options, true.into(), api.clone()); - let hash1 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); + let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())) + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().future, 1); // when @@ -787,7 +801,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -819,6 +834,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -845,6 +861,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -873,7 +890,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -910,7 +928,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -949,7 +968,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); @@ -988,7 +1008,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1013,7 +1034,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1046,7 +1068,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1113,7 +1136,9 @@ mod tests { // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -1124,8 +1149,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + let watcher = block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 2); // when diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 5368ff71d602..830dacd642ba 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -401,7 +401,8 @@ mod tests { TimedTransactionSource::new_external(false), uxt.clone().into(), )) - .expect("Should be valid"); + .expect("Should be valid") + .hash(); block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash])); @@ -440,7 +441,7 @@ mod tests { vec![(source.clone(), uxt0.into()), (source, uxt1.into())], )) .into_iter() - .map(|r| r.expect("Should be valid")) + .map(|r| r.expect("Should be valid").hash()) .collect::>(); assert_eq!(api.validation_requests().len(), 2); diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index e556ba9875f1..e4fd1b37c1f1 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -158,6 +158,7 @@ fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .map(|o| o.hash()) .unwrap(); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); @@ -184,10 +185,13 @@ fn prune_tags_should_work() { fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); - let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); + let hash = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .unwrap() + .hash(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); // when let pending: Vec<_> = pool @@ -198,7 +202,9 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); } #[test] From e1dce5a16941bbd1f6f474ef8b7d7cdde21bfd53 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:53:11 +0100 Subject: [PATCH 45/46] mempool: sizes fix --- .../transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 3a1a13971015..dc338ff000ae 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -288,13 +288,14 @@ where tx: TxInMemPool, tx_to_be_removed: Option>, ) -> Result>, sc_transaction_pool_api::error::Error> { - let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); tx_to_be_removed.inspect(|hash| { transactions.remove(hash); }); + let bytes = self.transactions.bytes(); + let result = match ( self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), From 04ac71a1af896e2ef0422f33475df4b97bbf35bb Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 3 Dec 2024 19:17:33 +0100 Subject: [PATCH 46/46] dropping transaction - size limit is properly obeyed now --- .../fork_aware_txpool/fork_aware_txpool.rs | 63 ++++---- .../src/fork_aware_txpool/tx_mem_pool.rs | 139 ++++++++++++++++-- .../src/fork_aware_txpool/view_store.rs | 5 +- .../transaction-pool/src/graph/tracked_map.rs | 10 ++ .../src/graph/validated_pool.rs | 24 +-- .../transaction-pool/tests/fatp_prios.rs | 16 +- 6 files changed, 193 insertions(+), 64 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index dd396e6178ca..3f8d66110432 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -643,7 +643,7 @@ where /// are reduced to single result. Refer to `reduce_multiview_result` for more details. async fn submit_at( &self, - _: ::Hash, + at: ::Hash, source: TransactionSource, xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { @@ -660,6 +660,21 @@ where .collect::>()) } + //todo: review + test maybe? + let retries = mempool_results + .into_iter() + .zip(xts.clone()) + .map(|(result, xt)| async move { + match result { + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(at, source, false, xt).await, + result @ _ => result, + } + }) + .collect::>(); + + let mempool_results = futures::future::join_all(retries).await; + let to_be_submitted = mempool_results .iter() .zip(xts) @@ -726,7 +741,7 @@ where log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source } = + let InsertionInfo { hash: xt_hash, source: timed_source, .. } = match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, Err(TxPoolApiError::ImmediatelyDropped) => @@ -1338,33 +1353,31 @@ where ) .await; - // 3. check if most_recent_view contains a transaction with lower priority (actually worse - - // do we want to check timestamp too - no: see #4609?) - // Would be perfect to choose transaction with lowest the number of dependant txs in its - // subtree. - if let Some(worst_tx_hash) = - best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) - { + if let Some(priority) = validated_tx.priority() { + // 3. check if mempool contains a transaction with lower priority (actually worse - do + // we want to check timestamp too - no: see #4609?) Would be perfect to choose + // transaction with lowest the number of dependant txs in its subtree. // 4. if yes - remove worse transaction from mempool and add new one. - log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?} replaced by {tx_hash:?}"); let insertion_info = - self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; - - // 5. notify listner - self.view_store - .listener - .transaction_dropped(DroppedTransaction::new_enforced_by_limts(worst_tx_hash)); - - // 6. remove transaction from the view_store - self.view_store.remove_transaction_subtree( - worst_tx_hash, - |listener, removed_tx_hash| { - listener.limits_enforced(&removed_tx_hash); - }, - ); + self.mempool.try_replace_transaction(xt, priority, source, watched)?; + + for worst_hash in &insertion_info.removed { + log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); + // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); + + // 6. remove transaction from the view_store + self.view_store.remove_transaction_subtree( + *worst_hash, + |listener, removed_tx_hash| { + listener.limits_enforced(&removed_tx_hash); + }, + ); + } // 8. add to pending_replacements - make sure it will not sneak back via cloned view - // 9. subemit new one to the view, this will be done upon in the caller return Ok(insertion_info) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index dc338ff000ae..af274b18c925 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -38,15 +38,19 @@ use crate::{ }; use futures::FutureExt; use itertools::Itertools; -use sc_transaction_pool_api::TransactionSource; +use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ + cmp::Ordering, collections::HashMap, - sync::{atomic, atomic::AtomicU64, Arc}, + sync::{ + atomic::{self, AtomicU64}, + Arc, + }, time::Instant, }; @@ -80,6 +84,9 @@ where source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, + /// Priority of transaction at some block. It is assumed it will not be changed often. + //todo: Option is needed here. This means lock. So maybe +AtomicBool? + priority: AtomicU64, //todo: we need to add future / ready status at finalized block. //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means // to replace them somehow with newly coming transactions. @@ -112,12 +119,23 @@ where Self::new(true, source, tx, bytes) } - /// Creates a new instance of wrapper for a transaction. + /// Creates a new instance of wrapper for a transaction with no priority. fn new( watched: bool, source: TransactionSource, tx: ExtrinsicFor, bytes: usize, + ) -> Self { + Self::new_with_priority(watched, source, tx, bytes, 0) + } + + /// Creates a new instance of wrapper for a transaction with given priority. + fn new_with_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: TransactionPriority, ) -> Self { Self { watched, @@ -125,6 +143,7 @@ where source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), bytes, + priority: AtomicU64::new(priority), } } @@ -139,6 +158,11 @@ where pub(crate) fn source(&self) -> TimedTransactionSource { self.source.clone() } + + /// Returns the priority of the transaction. + pub(crate) fn priority(&self) -> TransactionPriority { + self.priority.load(atomic::Ordering::Relaxed) + } } impl Size for Arc> @@ -198,11 +222,15 @@ where pub(super) struct InsertionInfo { pub(super) hash: Hash, pub(super) source: TimedTransactionSource, + pub(super) removed: Vec, } impl InsertionInfo { fn new(hash: Hash, source: TimedTransactionSource) -> Self { - Self { hash, source } + Self::new_with_removed(hash, source, Default::default()) + } + fn new_with_removed(hash: Hash, source: TimedTransactionSource, removed: Vec) -> Self { + Self { hash, source, removed } } } @@ -286,14 +314,9 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - tx_to_be_removed: Option>, ) -> Result>, sc_transaction_pool_api::error::Error> { let mut transactions = self.transactions.write(); - tx_to_be_removed.inspect(|hash| { - transactions.remove(hash); - }); - let bytes = self.transactions.bytes(); let result = match ( @@ -314,6 +337,85 @@ where result } + /// Attempts to insert a new transaction in the memory pool and drop some worse existing + /// transactions. + /// + /// This operation will not overflow the limit of the mempool. It means that cumulative + /// size of removed transactions will be equal (or greated) then size of newly inserted + /// transaction. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_insert_with_dropping( + &self, + hash: ExtrinsicHash, + new_tx: TxInMemPool, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let mut transactions = self.transactions.write(); + + if transactions.contains_key(&hash) { + return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); + } + + let mut sorted = + transactions.iter().map(|(h, v)| (h.clone(), v.clone())).collect::>(); + + // When pushing higher prio transaction, we need to find a number of lower prio txs, such + // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size + // limits. Naive way to do it - rev-sort by priority and eat the tail. + + // reverse (lowest prio last) + sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { + Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { + (Some(a), Some(b)) => b.cmp(&a), + _ => Ordering::Equal, + }, + ordering @ _ => ordering, + }); + + let required_size = new_tx.bytes; + let mut total_size = 0usize; + let mut to_be_removed = vec![]; + + while total_size < required_size { + let Some((worst_hash, worst_tx)) = sorted.pop() else { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + }; + + if worst_tx.priority() >= new_tx.priority() { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + total_size += worst_tx.bytes; + to_be_removed.push(worst_hash); + } + + let source = new_tx.source(); + transactions.insert(hash, Arc::from(new_tx)); + for worst_hash in &to_be_removed { + transactions.remove(worst_hash); + } + debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); + + Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + + // let result = match ( + // self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + // transactions.contains_key(&hash), + // ) { + // (false, false) => { + // let source = tx.source(); + // transactions.insert(hash, Arc::from(tx)); + // Ok(InsertionInfo::new(hash, source)) + // }, + // (_, true) => + // Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + // (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), + // }; + // log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, + // result.as_ref().map(|r| r.hash)); + } + /// Attempts to replace an existing transaction in the memory pool with a new one. /// /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully @@ -321,15 +423,14 @@ where pub(super) fn try_replace_transaction( &self, new_xt: ExtrinsicFor, + priority: TransactionPriority, source: TransactionSource, watched: bool, - replaced_tx_hash: ExtrinsicHash, ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&new_xt); - self.try_insert( + self.try_insert_with_dropping( hash, - TxInMemPool::new(watched, source, new_xt, length), - Some(replaced_tx_hash), + TxInMemPool::new_with_priority(watched, source, new_xt, length, priority), ) } @@ -347,7 +448,7 @@ where .iter() .map(|xt| { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length), None) + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) }) .collect::>(); result @@ -361,7 +462,7 @@ where xt: ExtrinsicFor, ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length), None) + self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -493,7 +594,11 @@ where } pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { - // todo!() + if let Some(priority) = outcome.priority() { + if let Some(tx) = self.transactions.write().get_mut(&outcome.hash()) { + tx.priority.store(priority, atomic::Ordering::Relaxed); + } + } } } @@ -650,4 +755,6 @@ mod tx_mem_pool_tests { sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } + + //add some test for try_insert_with_dropping } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 4ae54e99264c..89bf47cc3895 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -262,7 +262,7 @@ where log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); Err(err) }, - None => Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)), Some(Ok(r)) => Ok(r.into()), } } @@ -320,8 +320,7 @@ where }, Some(Ok(result)) => Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), - None => - Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash).with_watcher(external_watcher)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None).with_watcher(external_watcher)), } } diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 6c3bbbf34b55..818c1ebe544f 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -173,6 +173,16 @@ where pub fn len(&mut self) -> usize { self.inner_guard.len() } + + /// Returns an iterator over all values. + pub fn values(&self) -> std::collections::hash_map::Values { + self.inner_guard.values() + } + + /// Returns an iterator over all key-value pairs. + pub fn iter(&self) -> Iter<'_, K, V> { + self.inner_guard.iter() + } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 8886a4b91eb5..42a7b275cbf0 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -76,6 +76,14 @@ impl ValidatedTransaction { valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } + + /// Returns priority for valid transaction, None if transaction is not valid. + pub fn priority(&self) -> Option { + match self { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => Some(*priority), + _ => None, + } + } } /// A type of validated transaction stored in the validated pool. @@ -107,8 +115,9 @@ pub struct BaseSubmitOutcome { hash: ExtrinsicHash, /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. watcher: Option, - /// The priority of the transaction. Defaults to zero if unknown. - priority: TransactionPriority, + + /// The priority of the transaction. Defaults to None if unknown. + priority: Option, } /// Type alias to outcome of submission to `ValidatedPool`. @@ -117,15 +126,10 @@ pub type ValidatedPoolSubmitOutcome = impl BaseSubmitOutcome { /// Creates a new instance with given hash and priority. - pub fn new(hash: ExtrinsicHash, priority: TransactionPriority) -> Self { + pub fn new(hash: ExtrinsicHash, priority: Option) -> Self { Self { hash, priority, watcher: None } } - /// Creates a new instance with given hash and zeroed priority. - pub fn new_no_priority(hash: ExtrinsicHash) -> Self { - Self { hash, priority: 0u64, watcher: None } - } - /// Sets the transaction watcher. pub fn with_watcher(mut self, watcher: W) -> Self { self.watcher = Some(watcher); @@ -133,7 +137,7 @@ impl BaseSubmitOutcome { } /// Provides priority of submitted transaction. - pub fn priority(&self) -> TransactionPriority { + pub fn priority(&self) -> Option { self.priority } @@ -283,7 +287,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), priority)) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), Some(priority))) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 3c7db35323d8..abd8ebec2296 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -282,10 +282,8 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { api.set_priority(&xt4, 5); api.set_priority(&xt5, 6); - let _xt0_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let _xt1_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 2); @@ -313,13 +311,11 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { // let header04 = api.push_block_with_parent(header03.hash(), vec![], true); // block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!(xt2_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); - assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); - assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); - assert_ready_iterator!(header02.hash(), pool, []); + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); }