From 018f9d47531023aba6ec5e57dae32d084ba6d13b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:41:59 +0100 Subject: [PATCH 01/29] dropped_watcher: new fns are public --- .../src/fork_aware_txpool/dropped_watcher.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 7679e3b169d2..05c2d049a62f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -53,11 +53,13 @@ pub struct DroppedTransaction { } impl DroppedTransaction { - fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + /// Creates an new instnance with reason set to `DroppedReason::Usurped(by)`. + pub fn new_usurped(tx_hash: Hash, by: Hash) -> Self { Self { reason: DroppedReason::Usurped(by), tx_hash } } - fn new_enforced_by_limts(tx_hash: Hash) -> Self { + /// Creates an new instnance with reason set to `DroppedReason::LimitsEnforced`. + pub fn new_enforced_by_limts(tx_hash: Hash) -> Self { Self { reason: DroppedReason::LimitsEnforced, tx_hash } } } From 4aa24d324f7f1cb79611d02e99eee9d224d2b6c3 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:42:25 +0100 Subject: [PATCH 02/29] dropped_watcher: improvement --- .../transaction-pool/src/fork_aware_txpool/dropped_watcher.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 05c2d049a62f..48767ad61605 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -206,6 +206,10 @@ where views ); views.remove(&key); + //todo: merge heads up warning! + if views.is_empty() { + ctx.pending_dropped_transactions.push(*tx_hash); + } }); self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { From 9fe88a661c5419bdfce578e3f0e6b97a194236a6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:53:12 +0100 Subject: [PATCH 03/29] fatp: handling higher prio with full mempool --- .../fork_aware_txpool/fork_aware_txpool.rs | 100 +++++++++++++++--- .../src/fork_aware_txpool/tx_mem_pool.rs | 83 ++++++++++----- .../src/fork_aware_txpool/view.rs | 12 +++ .../src/fork_aware_txpool/view_store.rs | 20 ++++ .../src/graph/validated_pool.rs | 50 +++++++++ 5 files changed, 223 insertions(+), 42 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ec87f1fefa4..fbe960ac0da3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -49,8 +52,9 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream, + MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -287,7 +291,7 @@ where DroppedReason::LimitsEnforced => {}, }; - mempool.remove_dropped_transaction(&dropped_tx_hash).await; + mempool.remove_transaction(&dropped_tx_hash); view_store.listener.transaction_dropped(dropped); import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } @@ -675,9 +679,9 @@ where submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_| - mempool.remove(insertion.hash) - ) + .inspect_err(|_|{ + mempool.remove_transaction(&insertion.hash); + }) }) }) .collect::>()) @@ -712,18 +716,20 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); + let InsertionInfo { hash: xt_hash, source: timed_source } = match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, - Err(e) => return Err(e), + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(at, source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - self.view_store - .submit_and_watch(at, timed_source, xt) - .await - .inspect_err(|_| self.mempool.remove(xt_hash)) + self.view_store.submit_and_watch(at, timed_source, xt).await.inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -1131,7 +1137,7 @@ where for result in watched_results { if let Err(tx_hash) = result { self.view_store.listener.invalidate_transactions(&[tx_hash]); - self.mempool.remove(tx_hash); + self.mempool.remove_transaction(&tx_hash); } } } @@ -1263,6 +1269,74 @@ where fn tx_hash(&self, xt: &TransactionFor) -> TxHash { self.api.hash_and_length(xt).0 } + + /// Attempts to replace a lower-priority transaction in the transaction pool with a new one. + /// + /// This asynchronous function verifies the new transaction against the most recent view. If a + /// transaction with a lower priority exists in the transaction pool, it is replaced with the + /// new transaction. + /// + /// If no lower-priority transaction is found, the function returns an error indicating the + /// transaction was dropped immediately. + async fn attempt_transaction_replacement( + &self, + _: Block::Hash, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + // 1. we should validate at most_recent, and reuse result to submit there. + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + // 2. validate transaction at `at` + let (best_view, _) = self + .view_store + .get_view_at(at, false) + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (tx_hash, validated_tx) = best_view + .pool + .verify_one( + best_view.at.hash, + best_view.at.number, + TimedTransactionSource::from_transaction_source(source, false), + xt.clone(), + crate::graph::CheckBannedBeforeVerify::Yes, + ) + .await; + + // 3. check if most_recent_view contains a transaction with lower priority (actually worse - + // do we want to check timestamp too - no: see #4609?) + // Would be perfect to choose transaction with lowest the number of dependant txs in its + // subtree. + if let Some(worst_tx_hash) = + best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) + { + // 4. if yes - remove worse transaction from mempool and add new one. + log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?}"); + let insertion_info = + self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); + + // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_usurped(worst_tx_hash, tx_hash)); + + // 7. remove transaction from the view_store + self.view_store.remove_transaction_subtree(worst_tx_hash, tx_hash); + + // 8. add to pending_replacements - make sure it will not sneak back via cloned view + + // 9. subemit new one to the view, this will be done upon in the caller + return insertion_info + } + + Err(TxPoolApiError::ImmediatelyDropped) + } } #[async_trait] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 7b824d4653c2..4a7c3a22b18f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -101,19 +101,23 @@ where /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: false, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + Self::new(false, source, tx, bytes) } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self::new(true, source, tx, bytes) + } + + /// Creates a new instance of wrapper for a transaction. + fn new( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + ) -> Self { Self { - watched: true, + watched, tx, source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), @@ -279,27 +283,52 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result>, ChainApi::Error> { + tx_to_be_removed: Option>, + ) -> Result>, sc_transaction_pool_api::error::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); + + tx_to_be_removed.inspect(|hash| { + transactions.remove(hash); + }); + let result = match ( - !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), ) { - (true, false) => { + (false, false) => { let source = tx.source(); transactions.insert(hash, Arc::from(tx)); Ok(InsertionInfo::new(hash, source)) }, (_, true) => - Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), - (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), }; log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } + /// Attempts to replace an existing transaction in the memory pool with a new one. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_replace_transaction( + &self, + new_xt: ExtrinsicFor, + source: TransactionSource, + watched: bool, + replaced_tx_hash: ExtrinsicHash, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let (hash, length) = self.api.hash_and_length(&new_xt); + self.try_insert( + hash, + TxInMemPool::new(watched, source, new_xt, length), + Some(replaced_tx_hash), + ) + } + /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. /// /// Returns the vector of results for each transaction, the order corresponds to the input @@ -308,12 +337,13 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec>, ChainApi::Error>> { + ) -> Vec>, sc_transaction_pool_api::error::Error>> + { let result = xts .iter() .map(|xt| { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length), None) }) .collect::>(); result @@ -325,18 +355,9 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result>, ChainApi::Error> { + ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) - } - - /// Removes transaction from the memory pool which are specified by the given list of hashes. - pub(super) async fn remove_dropped_transaction( - &self, - dropped: &ExtrinsicHash, - ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(dropped) + self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length), None) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -362,9 +383,13 @@ where .collect::>() } - /// Removes a transaction from the memory pool based on a given hash. - pub(super) fn remove(&self, hash: ExtrinsicHash) { - let _ = self.transactions.write().remove(&hash); + /// Removes a transaction with given hash from the memory pool. + pub(super) fn remove_transaction( + &self, + hash: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction"); + self.transactions.write().remove(hash) } /// Revalidates a batch of transactions against the provided finalized block. diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 3cbb8fa4871d..9adb879cb2c7 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -460,4 +460,16 @@ where const IGNORE_BANNED: bool = false; self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() } + + /// Removes the whole transaction subtree from the inner pool. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub(super) fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) -> Vec> { + self.pool.validated_pool().remove_subtree(tx_hash, replaced_with) + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a06c051f0a7e..7687a5699747 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -690,4 +690,24 @@ where }; let _results = futures::future::join_all(submit_futures).await; } + + /// Removes the whole transaction subtree from every view within the view store. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub(super) fn remove_transaction_subtree( + &self, + xt_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&xt_hash)) + .for_each(|(_, view)| { + view.remove_subtree(xt_hash, replaced_with); + }); + } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 14df63d9673e..d283ee665c67 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -126,6 +126,8 @@ impl Clone for ValidatedPool { } } +type BaseTransactionFor = base::Transaction, ExtrinsicFor>; + impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { @@ -686,6 +688,54 @@ impl ValidatedPool { listener.future(&f.hash); }); } + + /// Searches the base pool for a transaction with a lower priority than the given one. + /// + /// Returns the hash of a lower-priority transaction if found, otherwise `None`. + pub fn find_transaction_with_lower_prio( + &self, + tx: ValidatedTransactionFor, + ) -> Option> { + match tx { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => { + let pool = self.pool.read(); + pool.fold_ready( + Some((priority, Option::>>::None)), + |acc, current| { + let (priority, _) = acc.as_ref().expect("acc is initialized. qed"); + if current.priority < *priority { + return Some((current.priority, Some(current.clone()))) + } else { + return acc + } + }, + ) + .map(|r| r.1.map(|tx| tx.hash))? + }, + _ => None, + } + } + + /// Removes the whole transaction subtree the pool. + /// + /// Intended to be called when removal is a result of replacement. Provided `replaced_with` + /// transaction hash is used in emitted _usurped_ event. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + replaced_with: ExtrinsicHash, + ) -> Vec> { + self.pool + .write() + .remove_subtree(&[tx_hash]) + .into_iter() + .map(|tx| tx.hash) + .inspect(|tx_hash| { + let mut listener = self.listener.write(); + listener.usurped(&tx_hash, &replaced_with) + }) + .collect::>() + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) From 54ae11c3c79a9aec52bafe2b39e2fd7abdf6df59 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:53:46 +0100 Subject: [PATCH 04/29] graph: fold_ready improved --- .../transaction-pool/src/graph/base_pool.rs | 54 +++++++++++-------- .../transaction-pool/src/graph/ready.rs | 10 ++-- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 04eaa998f42e..8dce507be4e3 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -438,6 +438,16 @@ impl BasePool(&self, init: R, mut f: F) -> R + where + F: FnMut(R, &Arc>) -> R, + { + self.ready + .fold::(init, |acc, current| f(acc, ¤t.transaction.transaction)) + } + /// Makes sure that the transactions in the queues stay within provided limits. /// /// Removes and returns worst transactions from the queues and all transactions that depend on @@ -453,27 +463,29 @@ impl BasePool, _>(|worst, current| { - let transaction = ¤t.transaction; - worst - .map(|worst| { - // Here we don't use `TransactionRef`'s ordering implementation because - // while it prefers priority like need here, it also prefers older - // transactions for inclusion purposes and limit enforcement needs to prefer - // newer transactions instead and drop the older ones. - match worst.transaction.priority.cmp(&transaction.transaction.priority) { - Ordering::Less => worst, - Ordering::Equal => - if worst.insertion_id > transaction.insertion_id { - transaction.clone() - } else { - worst - }, - Ordering::Greater => transaction.clone(), - } - }) - .or_else(|| Some(transaction.clone())) - }); + let worst = + self.ready.fold::>, _>(None, |worst, current| { + let transaction = ¤t.transaction; + worst + .map(|worst| { + // Here we don't use `TransactionRef`'s ordering implementation because + // while it prefers priority like need here, it also prefers older + // transactions for inclusion purposes and limit enforcement needs to + // prefer newer transactions instead and drop the older ones. + match worst.transaction.priority.cmp(&transaction.transaction.priority) + { + Ordering::Less => worst, + Ordering::Equal => + if worst.insertion_id > transaction.insertion_id { + transaction.clone() + } else { + worst + }, + Ordering::Greater => transaction.clone(), + } + }) + .or_else(|| Some(transaction.clone())) + }); if let Some(worst) = worst { removed.append(&mut self.remove_subtree(&[worst.transaction.hash.clone()])) diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 9061d0e25581..b8aef99e638d 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -232,12 +232,10 @@ impl ReadyTransactions { Ok(replaced) } - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>( - &mut self, - f: F, - ) -> Option { - self.ready.read().values().fold(None, f) + /// Fold a list of ready transactions to compute a single value using initial value of + /// accumulator. + pub fn fold) -> R>(&self, init: R, f: F) -> R { + self.ready.read().values().fold(init, f) } /// Returns true if given transaction is part of the queue. From a6882eb5b46529228741772a644cb4f01287f19f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:59:49 +0100 Subject: [PATCH 05/29] graph: make some staff public --- substrate/client/transaction-pool/src/graph/mod.rs | 1 + substrate/client/transaction-pool/src/graph/pool.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index d93898b1b22a..43f93dc26255 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -43,4 +43,5 @@ pub use self::pool::{ }; pub use validated_pool::{IsValidator, ValidatedTransaction}; +pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 23b71ce437b3..b30e9babc877 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -161,7 +161,7 @@ impl Default for Options { /// Should we check that the transaction is banned /// in the pool, before we verify it? #[derive(Copy, Clone)] -enum CheckBannedBeforeVerify { +pub(crate) enum CheckBannedBeforeVerify { Yes, No, } @@ -409,7 +409,7 @@ impl Pool { } /// Returns future that validates single transaction at given block. - async fn verify_one( + pub(crate) async fn verify_one( &self, block_hash: ::Hash, block_number: NumberFor, From 8d3dffe08485b8b7d00176d178c6de6e9ecc61fa Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 13:49:09 +0100 Subject: [PATCH 06/29] tests added --- .../transaction-pool/tests/fatp_common/mod.rs | 19 +- .../transaction-pool/tests/fatp_prios.rs | 287 +++++++++++++++++- 2 files changed, 298 insertions(+), 8 deletions(-) diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index aecd83360f1e..d9bc7d30b13b 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -192,12 +192,9 @@ macro_rules! assert_ready_iterator { let output: Vec<_> = ready_iterator.collect(); log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); log::debug!(target:LOG_TARGET, "output: {:#?}", output); + let output = output.into_iter().map(|t|t.hash).collect::>(); assert_eq!(expected.len(), output.len()); - assert!( - output.iter().zip(expected.iter()).all(|(o,e)| { - o.hash == *e - }) - ); + assert_eq!(output,expected); }}; } @@ -215,6 +212,18 @@ macro_rules! assert_future_iterator { }}; } +#[macro_export] +macro_rules! assert_watcher_stream { + ($stream:ident, [$( $event:expr ),*]) => {{ + let expected = vec![ $($event),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?} {}, block now:", expected, expected.len()); + let output = futures::executor::block_on_stream($stream).take(expected.len()).collect::>(); + log::debug!(target:LOG_TARGET, "output: {:#?}", output); + assert_eq!(expected.len(), output.len()); + assert_eq!(output, expected); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 41bc374b38f4..08e83e92b187 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -20,13 +20,14 @@ pub mod fatp_common; -use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use fatp_common::{invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; use futures::{executor::block_on, FutureExt}; use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; - #[test] fn fatp_prio_ready_higher_evicts_lower() { sp_tracing::try_init_simple(); @@ -247,3 +248,283 @@ fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); } + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); + assert_ready_iterator!(header02.hash(), pool, []); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!( + xt2_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); + assert_eq!( + xt3_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt5).0)] + ); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + // let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + // block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!( + xt0_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt1_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt2_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_ready_iterator!(header01.hash(), pool, [xt3]); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!( + xt0_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt1_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!( + xt2_watcher, + [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] + ); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(2).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 1); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]); + + let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); + assert!(matches!(result2.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); + let result3 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).map(|_| ()); + assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); +} + +#[test] +fn fatp_prios_watcher_full_mempool_does_not_keep_dropped_transaction() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt3]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); +} From 70fd186c2a586e1165d012a13bb865078afdf74c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 26 Nov 2024 13:52:08 +0100 Subject: [PATCH 07/29] type removed From d3a1a7bdc8d436cad9983969d718aefbe74d2d0f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 10:34:51 +0100 Subject: [PATCH 08/29] improvements --- .../fork_aware_txpool/fork_aware_txpool.rs | 21 +++++----- .../src/fork_aware_txpool/view.rs | 11 +++-- .../src/fork_aware_txpool/view_store.rs | 34 ++++++++++----- .../client/transaction-pool/src/graph/mod.rs | 2 +- .../src/graph/validated_pool.rs | 41 +++++++++++-------- 5 files changed, 67 insertions(+), 42 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index fbe960ac0da3..564fccc6fbb3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,10 +31,7 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{ - dropped_watcher::{DroppedReason, DroppedTransaction}, - revalidation_worker, - }, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -1270,7 +1267,8 @@ where self.api.hash_and_length(xt).0 } - /// Attempts to replace a lower-priority transaction in the transaction pool with a new one. + /// Attempts to find and replace a lower-priority transaction in the transaction pool with a new + /// one. /// /// This asynchronous function verifies the new transaction against the most recent view. If a /// transaction with a lower priority exists in the transaction pool, it is replaced with the @@ -1322,12 +1320,13 @@ where self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); // 5. notify listner - self.view_store - .listener - .transaction_dropped(DroppedTransaction::new_usurped(worst_tx_hash, tx_hash)); - - // 7. remove transaction from the view_store - self.view_store.remove_transaction_subtree(worst_tx_hash, tx_hash); + // 6. remove transaction from the view_store + self.view_store.remove_transaction_subtree( + worst_tx_hash, + |listener, removed_tx_hash| { + listener.usurped(&removed_tx_hash, &tx_hash); + }, + ); // 8. add to pending_replacements - make sure it will not sneak back via cloned view diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 9adb879cb2c7..f900da9e29f5 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -465,11 +465,14 @@ where /// /// Intended to be called when removal is a result of replacement. Provided `replaced_with` /// transaction hash is used in emitted _usurped_ event. - pub(super) fn remove_subtree( + pub fn remove_subtree( &self, tx_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) -> Vec> { - self.pool.validated_pool().remove_subtree(tx_hash, replaced_with) + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + self.pool.validated_pool().remove_subtree(tx_hash, listener_action) } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 7687a5699747..3c095ae2a599 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -38,7 +38,7 @@ use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, time::Instant, }; @@ -691,23 +691,37 @@ where let _results = futures::future::join_all(submit_futures).await; } - /// Removes the whole transaction subtree from every view within the view store. + /// Removes a transaction subtree from the every view in the view_store, starting from the given + /// transaction hash. /// - /// Intended to be called when removal is a result of replacement. Provided `replaced_with` - /// transaction hash is used in emitted _usurped_ event. - pub(super) fn remove_transaction_subtree( + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from every view. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. Note listener may be called multiple times for the + /// same hash. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. Vector contains only unique hashes. + pub(super) fn remove_transaction_subtree( &self, xt_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) { + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + let mut seen = HashSet::new(); let active_views = self.active_views.read(); let inactive_views = self.inactive_views.read(); active_views .iter() .chain(inactive_views.iter()) .filter(|(_, view)| view.is_imported(&xt_hash)) - .for_each(|(_, view)| { - view.remove_subtree(xt_hash, replaced_with); - }); + .map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) + .flatten() + .filter(|xt_hash| seen.insert(*xt_hash)) + .collect() } } diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 43f93dc26255..0810b532273a 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -41,7 +41,7 @@ pub use self::pool::{ BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, TransactionFor, ValidatedTransactionFor, }; -pub use validated_pool::{IsValidator, ValidatedTransaction}; +pub use validated_pool::{IsValidator, Listener, ValidatedTransaction}; pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index d283ee665c67..822d594e91ef 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -18,7 +18,6 @@ use std::{ collections::{HashMap, HashSet}, - hash, sync::Arc, }; @@ -26,17 +25,15 @@ use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; -use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ - traits::{self, SaturatedConversion}, + traits::SaturatedConversion, transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; use super::{ base_pool::{self as base, PruneStatus}, - listener::Listener, pool::{ BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, }, @@ -85,6 +82,8 @@ impl ValidatedTransaction { pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +pub type Listener = super::listener::Listener, B>; + /// A closure that returns true if the local node is a validator that can author blocks. #[derive(Clone)] pub struct IsValidator(Arc bool + Send + Sync>>); @@ -106,7 +105,7 @@ pub struct ValidatedPool { api: Arc, is_validator: IsValidator, options: Options, - listener: RwLock, B>>, + listener: RwLock>, pub(crate) pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, @@ -716,31 +715,41 @@ impl ValidatedPool { } } - /// Removes the whole transaction subtree the pool. + /// Removes a transaction subtree from the pool, starting from the given transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from the pool. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. /// - /// Intended to be called when removal is a result of replacement. Provided `replaced_with` - /// transaction hash is used in emitted _usurped_ event. - pub fn remove_subtree( + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. + pub fn remove_subtree( &self, tx_hash: ExtrinsicHash, - replaced_with: ExtrinsicHash, - ) -> Vec> { + listener_action: F, + ) -> Vec> + where + F: Fn(&mut Listener, ExtrinsicHash), + { self.pool .write() .remove_subtree(&[tx_hash]) .into_iter() - .map(|tx| tx.hash) - .inspect(|tx_hash| { + .map(|tx| { + let removed_tx_hash = tx.hash; let mut listener = self.listener.write(); - listener.usurped(&tx_hash, &replaced_with) + listener_action(&mut *listener, removed_tx_hash); + removed_tx_hash }) .collect::>() } } -fn fire_events(listener: &mut Listener, imported: &base::Imported) +fn fire_events(listener: &mut Listener, imported: &base::Imported, Ex>) where - H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { match *imported { From 4246dac3f857230167e614736d26a39a36459d2f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:48:08 +0100 Subject: [PATCH 09/29] make use of your brain --- .../fork_aware_txpool/fork_aware_txpool.rs | 8 +-- .../transaction-pool/src/graph/listener.rs | 4 +- .../src/graph/validated_pool.rs | 2 +- .../transaction-pool/tests/fatp_prios.rs | 50 +++++-------------- 4 files changed, 20 insertions(+), 44 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 564fccc6fbb3..1a75ef9d861d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1315,23 +1315,23 @@ where best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) { // 4. if yes - remove worse transaction from mempool and add new one. - log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?}"); + log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?} replaced by {tx_hash:?}"); let insertion_info = - self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash); + self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; // 5. notify listner // 6. remove transaction from the view_store self.view_store.remove_transaction_subtree( worst_tx_hash, |listener, removed_tx_hash| { - listener.usurped(&removed_tx_hash, &tx_hash); + listener.limits_enforced(&removed_tx_hash); }, ); // 8. add to pending_replacements - make sure it will not sneak back via cloned view // 9. subemit new one to the view, this will be done upon in the caller - return insertion_info + return Ok(insertion_info) } Err(TxPoolApiError::ImmediatelyDropped) diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 41daf5491f70..7b09ee4c6409 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -126,8 +126,8 @@ impl Listener ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.limit_enforced(h); + listener.limits_enforced(h); } removed diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 08e83e92b187..3c7db35323d8 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -310,23 +310,17 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { assert_pool_status!(header03.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 4); - let header04 = api.push_block_with_parent(header03.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + // let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + // block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); + assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); assert_ready_iterator!(header02.hash(), pool, []); assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); - - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!( - xt2_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); - assert_eq!( - xt3_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt5).0)] - ); } #[test] @@ -370,18 +364,9 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { assert_pool_status!(header01.hash(), &pool, 2, 0); assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); - assert_watcher_stream!( - xt0_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt1_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt2_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); } @@ -428,18 +413,9 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { assert_pool_status!(header02.hash(), &pool, 2, 0); assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); - assert_watcher_stream!( - xt0_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt1_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); - assert_watcher_stream!( - xt2_watcher, - [TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt4).0)] - ); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); } From c203d72dd199155c14cb9c929666e4233f50faa7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:40:08 +0100 Subject: [PATCH 10/29] fatp: pending actions now support removals --- .../fork_aware_txpool/fork_aware_txpool.rs | 9 +- .../src/fork_aware_txpool/view_store.rs | 166 ++++++++++++++---- 2 files changed, 141 insertions(+), 34 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 1a75ef9d861d..0ad6ffd75522 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -1320,6 +1323,10 @@ where self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(worst_tx_hash)); + // 6. remove transaction from the view_store self.view_store.remove_transaction_subtree( worst_tx_hash, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 3c095ae2a599..9de3e7422981 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -43,15 +43,13 @@ use std::{ time::Instant, }; -/// Helper struct to keep the context for transaction replacements. +/// Helper struct to maintain the context for pending transaction submission, executed for +/// newly inserted views. #[derive(Clone)] -struct PendingTxReplacement +struct PendingTxSubmission where ChainApi: graph::ChainApi, { - /// Indicates if the new transaction was already submitted to all the views in the view_store. - /// If true, it can be removed after inserting any new view. - processed: bool, /// New transaction replacing the old one. xt: ExtrinsicFor, /// Source of the transaction. @@ -60,13 +58,84 @@ where watched: bool, } -impl PendingTxReplacement +/// Helper type representing the callback allowing to trigger per-transaction events on +/// `ValidatedPool`'s listener. +type RemovalListener = + Arc, ExtrinsicHash) + Send + Sync>; + +/// Helper struct to maintain the context for pending transaction removal, executed for +/// newly inserted views. +struct PendingTxRemoval +where + ChainApi: graph::ChainApi, +{ + /// Hash of the transaction that will be removed, + xt_hash: ExtrinsicHash, + /// Action that shall be executed on underlying `ValidatedPool`'s listener. + listener_action: RemovalListener, +} + +/// This enum represents an action that should be executed on the newly built +/// view before this view is inserted into the view store. +enum PreInsertAction +where + ChainApi: graph::ChainApi, +{ + /// Represents the action of submitting a new transaction. Intended to use to handle usurped + /// transactions. + SubmitTx(PendingTxSubmission), + + /// Represents the action of removing a subtree of transactions. + RemoveSubtree(PendingTxRemoval), +} + +/// Represents a task awaiting execution, to be performed immediately prior to the view insertion +/// into the view store. +struct PendingPreInsertTask where ChainApi: graph::ChainApi, { - /// Creates new unprocessed instance of pending transaction replacement. - fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { - Self { processed: false, xt, source, watched } + /// The action to be applied when inserting a new view. + action: PreInsertAction, + /// Indicates if the action was already applied to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, +} + +impl PendingPreInsertTask +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction submission. + fn new_submission_action( + xt: ExtrinsicFor, + source: TimedTransactionSource, + watched: bool, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::SubmitTx(PendingTxSubmission { xt, source, watched }), + } + } + + /// Creates new processed instance of pending transaction removal. + fn new_removal_action( + xt_hash: ExtrinsicHash, + listener: RemovalListener, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::RemoveSubtree(PendingTxRemoval { + xt_hash, + listener_action: listener, + }), + } + } + + /// Marks a task as done for every view present in view store. Basically means that can be + /// removed on new view insertion. + fn mark_processed(&mut self) { + self.processed = true; } } @@ -100,9 +169,8 @@ where /// notifcication threads. It is meant to assure that replaced transaction is also removed from /// newly built views in maintain process. /// - /// The map's key is hash of replaced extrinsic. - pending_txs_replacements: - RwLock, PendingTxReplacement>>, + /// The map's key is hash of actionable extrinsic (to avoid duplicated entries). + pending_txs_tasks: RwLock, PendingPreInsertTask>>, } impl ViewStore @@ -124,7 +192,7 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, - pending_txs_replacements: Default::default(), + pending_txs_tasks: Default::default(), } } @@ -575,8 +643,12 @@ where replaced: ExtrinsicHash, watched: bool, ) { - if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { - entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(replaced) { + entry.insert(PendingPreInsertTask::new_submission_action( + xt.clone(), + source.clone(), + watched, + )); } else { return }; @@ -586,8 +658,8 @@ where self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; - if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { - replacement.processed = true; + if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) { + replacement.mark_processed(); } } @@ -596,18 +668,25 @@ where /// After application, all already processed replacements are removed. async fn apply_pending_tx_replacements(&self, view: Arc>) { let mut futures = vec![]; - for replacement in self.pending_txs_replacements.read().values() { - let xt_hash = self.api.hash_and_length(&replacement.xt).0; - futures.push(self.replace_transaction_in_view( - view.clone(), - replacement.source.clone(), - replacement.xt.clone(), - xt_hash, - replacement.watched, - )); + for replacement in self.pending_txs_tasks.read().values() { + match replacement.action { + PreInsertAction::SubmitTx(ref submission) => { + let xt_hash = self.api.hash_and_length(&submission.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + submission.source.clone(), + submission.xt.clone(), + xt_hash, + submission.watched, + )); + }, + PreInsertAction::RemoveSubtree(ref removal) => { + view.remove_subtree(removal.xt_hash, &*removal.listener_action); + }, + } } let _results = futures::future::join_all(futures).await; - self.pending_txs_replacements.write().retain(|_, r| r.processed); + self.pending_txs_tasks.write().retain(|_, r| r.processed); } /// Submits `xt` to the given view. @@ -702,6 +781,9 @@ where /// allows to trigger the required events. Note listener may be called multiple times for the /// same hash. /// + /// Function will also schedule view pre-insertion actions to ensure that transactions will be + /// removed from newly created view. + /// /// Returns a vector containing the hashes of all removed transactions, including the root /// transaction specified by `tx_hash`. Vector contains only unique hashes. pub(super) fn remove_transaction_subtree( @@ -710,18 +792,36 @@ where listener_action: F, ) -> Vec> where - F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + F: Fn(&mut crate::graph::Listener, ExtrinsicHash) + + Clone + + Send + + Sync + + 'static, { + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(xt_hash) { + entry.insert(PendingPreInsertTask::new_removal_action( + xt_hash, + Arc::from(listener_action.clone()), + )); + }; + let mut seen = HashSet::new(); - let active_views = self.active_views.read(); - let inactive_views = self.inactive_views.read(); - active_views + + let removed = self + .active_views + .read() .iter() - .chain(inactive_views.iter()) + .chain(self.inactive_views.read().iter()) .filter(|(_, view)| view.is_imported(&xt_hash)) .map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) .flatten() .filter(|xt_hash| seen.insert(*xt_hash)) - .collect() + .collect(); + + if let Some(removal_action) = self.pending_txs_tasks.write().get_mut(&xt_hash) { + removal_action.mark_processed(); + } + + removed } } From edb1257068161c5350210802ec486a9ec3a36ff9 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:43:41 +0100 Subject: [PATCH 11/29] validated_pool: SubmitOutcome --- .../client/transaction-pool/src/graph/mod.rs | 4 +- .../client/transaction-pool/src/graph/pool.rs | 10 +-- .../src/graph/validated_pool.rs | 69 ++++++++++++++++--- 3 files changed, 69 insertions(+), 14 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 0810b532273a..008503fb4cdf 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -41,7 +41,9 @@ pub use self::pool::{ BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, TransactionFor, ValidatedTransactionFor, }; -pub use validated_pool::{IsValidator, Listener, ValidatedTransaction}; +pub use validated_pool::{ + BaseSubmitOutcome, IsValidator, Listener, ValidatedPoolSubmitOutcome, ValidatedTransaction, +}; pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index b30e9babc877..dcab98a33ffe 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -37,7 +37,7 @@ use std::{ use super::{ base_pool as base, validated_pool::{IsValidator, ValidatedPool, ValidatedTransaction}, - watcher::Watcher, + ValidatedPoolSubmitOutcome, }; /// Modification notification event stream type; @@ -182,7 +182,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -194,7 +194,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -205,7 +205,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, B::Error> { + ) -> Result, B::Error> { let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -216,7 +216,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { let (_, tx) = self .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) .await; diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 072751c2a569..720a80698538 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -24,7 +24,7 @@ use std::{ use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::SaturatedConversion, @@ -78,10 +78,11 @@ impl ValidatedTransaction { } } -/// A type of validated transaction stored in the pool. +/// A type of validated transaction stored in the validated pool. pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +/// A type alias representing ValidatedPool listener for given ChainApi type. pub type Listener = super::listener::Listener, B>; /// A closure that returns true if the local node is a validator that can author blocks. @@ -100,6 +101,54 @@ impl From bool + Send + Sync>> for IsValidator { } } +/// Represents the result of `submit` or `submit_and_watch` operations. +pub struct BaseSubmitOutcome { + /// The hash of the submitted transaction. + hash: ExtrinsicHash, + /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. + watcher: Option, + /// The priority of the transaction. Defaults to zero if unknown. + priority: TransactionPriority, +} + +/// Type alias to outcome of submission to `ValidatedPool`. +pub type ValidatedPoolSubmitOutcome = + BaseSubmitOutcome, ExtrinsicHash>>; + +impl BaseSubmitOutcome { + /// Creates a new instance with given hash and priority. + pub fn new(hash: ExtrinsicHash, priority: TransactionPriority) -> Self { + Self { hash, priority, watcher: None } + } + + /// Creates a new instance with given hash and zeroed priority. + pub fn new_no_priority(hash: ExtrinsicHash) -> Self { + Self { hash, priority: 0u64, watcher: None } + } + + /// Sets the transaction watcher. + pub fn with_watcher(mut self, watcher: W) -> Self { + self.watcher = Some(watcher); + self + } + + /// Provides priority of submitted transaction. + pub fn priority(&self) -> TransactionPriority { + self.priority + } + + /// Provides hash of submitted transaction. + pub fn hash(&self) -> ExtrinsicHash { + self.hash + } + + /// Provides a watcher. Should only be called on outcomes of `submit_and_watch`. Otherwise will + /// panic (that would mean logical error in program). + pub fn expect_watcher(&mut self) -> W { + self.watcher.take().expect("watcher was set in submit_and_watch. qed") + } +} + /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, @@ -176,7 +225,7 @@ impl ValidatedPool { pub fn submit( &self, txs: impl IntoIterator>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let results = txs .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) @@ -192,7 +241,7 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => + Ok(outcome) if removed.contains(&outcome.hash) => Err(error::Error::ImmediatelyDropped.into()), other => other, }) @@ -200,9 +249,13 @@ impl ValidatedPool { } /// Submit single pre-validated transaction to the pool. - fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { + fn submit_one( + &self, + tx: ValidatedTransactionFor, + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + let priority = tx.priority; log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash); if !tx.propagate && !(self.is_validator.0)() { return Err(error::Error::Unactionable.into()) @@ -230,7 +283,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(*imported.hash()) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), priority)) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); @@ -294,7 +347,7 @@ impl ValidatedPool { pub fn submit_and_watch( &self, tx: ValidatedTransactionFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { let hash = self.api.hash_and_length(&tx.data).0; @@ -302,7 +355,7 @@ impl ValidatedPool { self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) .pop() .expect("One extrinsic passed; one result returned; qed") - .map(|_| watcher) + .map(|outcome| outcome.with_watcher(watcher)) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); From f778176dad2f6ed95c5ca5da16c30938e64f92a5 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:47:01 +0100 Subject: [PATCH 12/29] view/view_store: SubmitOutcome --- .../src/fork_aware_txpool/view.rs | 8 +- .../src/fork_aware_txpool/view_store.rs | 73 +++++++++++++------ 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index f900da9e29f5..291a1f7d7fa3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -28,7 +28,7 @@ use crate::{ common::log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, - IsValidator, ValidatedTransaction, ValidatedTransactionFor, + IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; @@ -158,7 +158,7 @@ where pub(super) async fn submit_many( &self, xts: impl IntoIterator)>, - ) -> Vec, ChainApi::Error>> { + ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); @@ -173,7 +173,7 @@ where &self, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, ChainApi::Error> { + ) -> Result, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); self.pool.submit_and_watch(&self.at, source, xt).await } @@ -182,7 +182,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 9de3e7422981..4ae54e99264c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -27,7 +27,7 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, TransactionFor, + BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, }, ReadyIteratorFor, LOG_TARGET, }; @@ -173,6 +173,18 @@ where pending_txs_tasks: RwLock, PendingPreInsertTask>>, } +/// Type alias to outcome of submission to `ViewStore`. +pub(super) type ViewStoreSubmitOutcome = + BaseSubmitOutcome>; + +impl From> + for ViewStoreSubmitOutcome +{ + fn from(value: ValidatedPoolSubmitOutcome) -> Self { + Self::new(value.hash(), value.priority()) + } +} + impl ViewStore where Block: BlockT, @@ -200,7 +212,7 @@ where pub(super) async fn submit( &self, xts: impl IntoIterator)> + Clone, - ) -> HashMap, ChainApi::Error>>> { + ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); active_views @@ -208,7 +220,16 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(xts).await) } + async move { + ( + view.at.hash, + view.submit_many(xts) + .await + .into_iter() + .map(|r| r.map(Into::into)) + .collect::>(), + ) + } }) .collect::>() }; @@ -221,7 +242,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let active_views = self .active_views .read() @@ -236,12 +257,14 @@ where .map(|view| view.submit_local(xt.clone())) .find_or_first(Result::is_ok); - if let Some(Err(err)) = result { - log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); - return Err(err) - }; - - Ok(tx_hash) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + Err(err) + }, + None => Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash)), + Some(Ok(r)) => Ok(r.into()), + } } /// Import a single extrinsic and starts to watch its progress in the pool. @@ -256,7 +279,7 @@ where _at: Block::Hash, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) @@ -271,13 +294,13 @@ where let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( tx_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); - Ok(()) + Ok(result) }, Err(e) => Err(e), } @@ -285,17 +308,21 @@ where }) .collect::>() }; - let maybe_error = futures::future::join_all(submit_and_watch_futures) + let result = futures::future::join_all(submit_and_watch_futures) .await .into_iter() .find_or_first(Result::is_ok); - if let Some(Err(err)) = maybe_error { - log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); - }; - - Ok(external_watcher) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); + return Err(err); + }, + Some(Ok(result)) => + Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), + None => + Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash).with_watcher(external_watcher)), + } } /// Returns the pool status for every active view. @@ -702,11 +729,11 @@ where ) { if watched { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( xt_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); }, Err(e) => { From a72b3f9fc4b20024222a705cdf8674fd809721c1 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:48:02 +0100 Subject: [PATCH 13/29] mempool: update_transaction stub --- .../src/fork_aware_txpool/tx_mem_pool.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 4a7c3a22b18f..5b6c9c56a044 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,7 +26,10 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use super::{ + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStoreSubmitOutcome, +}; use crate::{ common::log_xt::log_xt_trace, graph, @@ -487,6 +490,10 @@ where }); self.listener.invalidate_transactions(&invalid_hashes); } + + pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { + // todo!() + } } #[cfg(test)] From c411bb4e4345b501fce87add581034724e61286c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:50:01 +0100 Subject: [PATCH 14/29] fatp: SubmitOutcome --- .../fork_aware_txpool/fork_aware_txpool.rs | 35 +++++++++++++++---- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 0ad6ffd75522..0ff77f7d52c0 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -602,7 +602,7 @@ where /// out: /// [ Ok(xth0), Ok(xth1), Err ] /// ``` -fn reduce_multiview_result(input: HashMap>>) -> Vec> { +fn reduce_multiview_result(input: HashMap>>) -> Vec> { let mut values = input.values(); let Some(first) = values.next() else { return Default::default(); @@ -684,6 +684,10 @@ where }) }) }) + .map(|r| r.map(|r| { + mempool.update_transaction(&r); + r.hash() + })) .collect::>()) } @@ -727,9 +731,16 @@ where self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - self.view_store.submit_and_watch(at, timed_source, xt).await.inspect_err(|_| { - self.mempool.remove_transaction(&xt_hash); - }) + self.view_store + .submit_and_watch(at, timed_source, xt) + .await + .inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) + .map(|mut outcome| { + self.mempool.update_transaction(&outcome); + outcome.expect_watcher() + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -863,7 +874,13 @@ where .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + self.view_store + .submit_local(xt) + .map(|outcome| { + self.mempool.update_transaction(&outcome); + outcome.hash() + }) + .or_else(|_| Ok(xt_hash)) } } @@ -1115,7 +1132,11 @@ where .await .into_iter() .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .map(|(result, tx_hash)| { + result + .map(|outcome| self.mempool.update_transaction(&outcome.into())) + .or_else(|_| Err(tx_hash)) + }) .collect::>(); let submitted_count = watched_results.len(); @@ -1490,7 +1511,7 @@ mod reduce_multiview_result_tests { fn empty() { sp_tracing::try_init_simple(); let input = HashMap::default(); - let r = reduce_multiview_result::(input); + let r = reduce_multiview_result::(input); assert!(r.is_empty()); } From 7b461bff26c085d9c0fbaac777a41f049386c10a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:51:02 +0100 Subject: [PATCH 15/29] fatp: todo added --- .../transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 0ff77f7d52c0..f25f837d31ef 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1133,6 +1133,8 @@ where .into_iter() .zip(hashes) .map(|(result, tx_hash)| { + //todo: we may need a bool flag here indicating if we need to update priority + //(premature optimization) result .map(|outcome| self.mempool.update_transaction(&outcome.into())) .or_else(|_| Err(tx_hash)) From 8765d2ce9e9d6393707019c77656fcabec56b452 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:51:53 +0100 Subject: [PATCH 16/29] single-state txpool: SubmitOutcome integration --- .../single_state_txpool.rs | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index e7504012ca67..0fcdfb2cddc8 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -266,7 +266,12 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) + Ok(pool + .submit_at(&at, xts) + .await + .into_iter() + .map(|result| result.map(|outcome| outcome.hash())) + .collect()) } async fn submit_one( @@ -284,6 +289,7 @@ where let at = HashAndNumber { hash: at, number: number? }; pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) .await + .map(|outcome| outcome.hash()) } async fn submit_and_watch( @@ -300,15 +306,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; - - Ok(watcher.into_stream().boxed()) + pool.submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await + .map(|mut outcome| outcome.expect_watcher().into_stream().boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -476,7 +480,11 @@ where validity, ); - self.pool.validated_pool().submit(vec![validated]).remove(0) + self.pool + .validated_pool() + .submit(vec![validated]) + .remove(0) + .map(|outcome| outcome.hash()) } } From e8ccd44c09b767d2a07ff71774cbbf1b09309479 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:52:45 +0100 Subject: [PATCH 17/29] tests: SubmitOutcome fixes --- .../client/transaction-pool/src/graph/pool.rs | 70 +++++++++++++------ .../src/single_state_txpool/revalidation.rs | 5 +- .../client/transaction-pool/tests/pool.rs | 14 ++-- 3 files changed, 61 insertions(+), 28 deletions(-) diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index dcab98a33ffe..2d049eb41172 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -516,6 +516,7 @@ mod tests { .into(), ), ) + .map(|outcome| outcome.hash()) .unwrap(); // then @@ -544,7 +545,10 @@ mod tests { // when let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)) + .into_iter() + .map(|r| r.map(|o| o.hash())) + .collect::>(); log::debug!("--> {hashes:#?}"); // then @@ -568,7 +572,8 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -591,7 +596,8 @@ mod tests { let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -619,7 +625,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash1 = block_on( pool.submit_one( &han_of_block0, @@ -633,7 +640,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // future doesn't count let _hash = block_on( pool.submit_one( @@ -648,7 +656,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -681,7 +690,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash2 = block_on( pool.submit_one( &han_of_block0, @@ -695,7 +705,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash3 = block_on( pool.submit_one( &han_of_block0, @@ -709,7 +720,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when pool.validated_pool.clear_stale(&api.expect_hash_and_number(5)); @@ -741,7 +753,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1])); @@ -769,8 +782,9 @@ mod tests { let api = Arc::new(TestApi::default()); let pool = Pool::new(options, true.into(), api.clone()); - let hash1 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); + let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())) + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().future, 1); // when @@ -787,7 +801,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -819,6 +834,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -845,6 +861,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -873,7 +890,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -910,7 +928,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -949,7 +968,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); @@ -988,7 +1008,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1013,7 +1034,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1046,7 +1068,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1113,7 +1136,9 @@ mod tests { // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -1124,8 +1149,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + let watcher = block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 2); // when diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 74031b1e1c72..eba0277c29a8 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -401,7 +401,8 @@ mod tests { TimedTransactionSource::new_external(false), uxt.clone().into(), )) - .expect("Should be valid"); + .expect("Should be valid") + .hash(); block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash])); @@ -440,7 +441,7 @@ mod tests { vec![(source.clone(), uxt0.into()), (source, uxt1.into())], )) .into_iter() - .map(|r| r.expect("Should be valid")) + .map(|r| r.expect("Should be valid").hash()) .collect::>(); assert_eq!(api.validation_requests().len(), 2); diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index e556ba9875f1..e4fd1b37c1f1 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -158,6 +158,7 @@ fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .map(|o| o.hash()) .unwrap(); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); @@ -184,10 +185,13 @@ fn prune_tags_should_work() { fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); - let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); + let hash = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .unwrap() + .hash(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); // when let pending: Vec<_> = pool @@ -198,7 +202,9 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); } #[test] From 6cca27201c5222b68a9ac139a12272bf5435ac80 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 29 Nov 2024 17:53:11 +0100 Subject: [PATCH 18/29] mempool: sizes fix --- .../transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 5b6c9c56a044..96a1a80fe761 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -288,13 +288,14 @@ where tx: TxInMemPool, tx_to_be_removed: Option>, ) -> Result>, sc_transaction_pool_api::error::Error> { - let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); tx_to_be_removed.inspect(|hash| { transactions.remove(hash); }); + let bytes = self.transactions.bytes(); + let result = match ( self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), From 3b17a169800fad6e76cc4290ca7bc3b03ba15e8b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 3 Dec 2024 19:17:33 +0100 Subject: [PATCH 19/29] dropping transaction - size limit is properly obeyed now --- .../fork_aware_txpool/fork_aware_txpool.rs | 63 ++++---- .../src/fork_aware_txpool/tx_mem_pool.rs | 139 ++++++++++++++++-- .../src/fork_aware_txpool/view_store.rs | 5 +- .../transaction-pool/src/graph/tracked_map.rs | 10 ++ .../src/graph/validated_pool.rs | 24 +-- .../transaction-pool/tests/fatp_prios.rs | 16 +- 6 files changed, 193 insertions(+), 64 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index f25f837d31ef..6b8a6be22458 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -643,7 +643,7 @@ where /// are reduced to single result. Refer to `reduce_multiview_result` for more details. async fn submit_at( &self, - _: ::Hash, + at: ::Hash, source: TransactionSource, xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { @@ -657,6 +657,21 @@ where return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) } + //todo: review + test maybe? + let retries = mempool_results + .into_iter() + .zip(xts.clone()) + .map(|(result, xt)| async move { + match result { + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(at, source, false, xt).await, + result @ _ => result, + } + }) + .collect::>(); + + let mempool_results = futures::future::join_all(retries).await; + let to_be_submitted = mempool_results .iter() .zip(xts) @@ -721,7 +736,7 @@ where log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source } = + let InsertionInfo { hash: xt_hash, source: timed_source, .. } = match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, Err(TxPoolApiError::ImmediatelyDropped) => @@ -1333,33 +1348,31 @@ where ) .await; - // 3. check if most_recent_view contains a transaction with lower priority (actually worse - - // do we want to check timestamp too - no: see #4609?) - // Would be perfect to choose transaction with lowest the number of dependant txs in its - // subtree. - if let Some(worst_tx_hash) = - best_view.pool.validated_pool().find_transaction_with_lower_prio(validated_tx) - { + if let Some(priority) = validated_tx.priority() { + // 3. check if mempool contains a transaction with lower priority (actually worse - do + // we want to check timestamp too - no: see #4609?) Would be perfect to choose + // transaction with lowest the number of dependant txs in its subtree. // 4. if yes - remove worse transaction from mempool and add new one. - log::trace!(target: LOG_TARGET, "found candidate for removal: {worst_tx_hash:?} replaced by {tx_hash:?}"); let insertion_info = - self.mempool.try_replace_transaction(xt, source, watched, worst_tx_hash)?; - - // 5. notify listner - self.view_store - .listener - .transaction_dropped(DroppedTransaction::new_enforced_by_limts(worst_tx_hash)); - - // 6. remove transaction from the view_store - self.view_store.remove_transaction_subtree( - worst_tx_hash, - |listener, removed_tx_hash| { - listener.limits_enforced(&removed_tx_hash); - }, - ); + self.mempool.try_replace_transaction(xt, priority, source, watched)?; + + for worst_hash in &insertion_info.removed { + log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); + // 5. notify listner + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); + + // 6. remove transaction from the view_store + self.view_store.remove_transaction_subtree( + *worst_hash, + |listener, removed_tx_hash| { + listener.limits_enforced(&removed_tx_hash); + }, + ); + } // 8. add to pending_replacements - make sure it will not sneak back via cloned view - // 9. subemit new one to the view, this will be done upon in the caller return Ok(insertion_info) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 96a1a80fe761..c851af111e32 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -38,15 +38,19 @@ use crate::{ }; use futures::FutureExt; use itertools::Itertools; -use sc_transaction_pool_api::TransactionSource; +use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ + cmp::Ordering, collections::HashMap, - sync::{atomic, atomic::AtomicU64, Arc}, + sync::{ + atomic::{self, AtomicU64}, + Arc, + }, time::Instant, }; @@ -80,6 +84,9 @@ where source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, + /// Priority of transaction at some block. It is assumed it will not be changed often. + //todo: Option is needed here. This means lock. So maybe +AtomicBool? + priority: AtomicU64, //todo: we need to add future / ready status at finalized block. //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means // to replace them somehow with newly coming transactions. @@ -112,12 +119,23 @@ where Self::new(true, source, tx, bytes) } - /// Creates a new instance of wrapper for a transaction. + /// Creates a new instance of wrapper for a transaction with no priority. fn new( watched: bool, source: TransactionSource, tx: ExtrinsicFor, bytes: usize, + ) -> Self { + Self::new_with_priority(watched, source, tx, bytes, 0) + } + + /// Creates a new instance of wrapper for a transaction with given priority. + fn new_with_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: TransactionPriority, ) -> Self { Self { watched, @@ -125,6 +143,7 @@ where source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), bytes, + priority: AtomicU64::new(priority), } } @@ -139,6 +158,11 @@ where pub(crate) fn source(&self) -> TimedTransactionSource { self.source.clone() } + + /// Returns the priority of the transaction. + pub(crate) fn priority(&self) -> TransactionPriority { + self.priority.load(atomic::Ordering::Relaxed) + } } impl Size for Arc> @@ -198,11 +222,15 @@ where pub(super) struct InsertionInfo { pub(super) hash: Hash, pub(super) source: TimedTransactionSource, + pub(super) removed: Vec, } impl InsertionInfo { fn new(hash: Hash, source: TimedTransactionSource) -> Self { - Self { hash, source } + Self::new_with_removed(hash, source, Default::default()) + } + fn new_with_removed(hash: Hash, source: TimedTransactionSource, removed: Vec) -> Self { + Self { hash, source, removed } } } @@ -286,14 +314,9 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - tx_to_be_removed: Option>, ) -> Result>, sc_transaction_pool_api::error::Error> { let mut transactions = self.transactions.write(); - tx_to_be_removed.inspect(|hash| { - transactions.remove(hash); - }); - let bytes = self.transactions.bytes(); let result = match ( @@ -314,6 +337,85 @@ where result } + /// Attempts to insert a new transaction in the memory pool and drop some worse existing + /// transactions. + /// + /// This operation will not overflow the limit of the mempool. It means that cumulative + /// size of removed transactions will be equal (or greated) then size of newly inserted + /// transaction. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_insert_with_dropping( + &self, + hash: ExtrinsicHash, + new_tx: TxInMemPool, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let mut transactions = self.transactions.write(); + + if transactions.contains_key(&hash) { + return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); + } + + let mut sorted = + transactions.iter().map(|(h, v)| (h.clone(), v.clone())).collect::>(); + + // When pushing higher prio transaction, we need to find a number of lower prio txs, such + // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size + // limits. Naive way to do it - rev-sort by priority and eat the tail. + + // reverse (lowest prio last) + sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { + Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { + (Some(a), Some(b)) => b.cmp(&a), + _ => Ordering::Equal, + }, + ordering @ _ => ordering, + }); + + let required_size = new_tx.bytes; + let mut total_size = 0usize; + let mut to_be_removed = vec![]; + + while total_size < required_size { + let Some((worst_hash, worst_tx)) = sorted.pop() else { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + }; + + if worst_tx.priority() >= new_tx.priority() { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + total_size += worst_tx.bytes; + to_be_removed.push(worst_hash); + } + + let source = new_tx.source(); + transactions.insert(hash, Arc::from(new_tx)); + for worst_hash in &to_be_removed { + transactions.remove(worst_hash); + } + debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); + + Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + + // let result = match ( + // self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + // transactions.contains_key(&hash), + // ) { + // (false, false) => { + // let source = tx.source(); + // transactions.insert(hash, Arc::from(tx)); + // Ok(InsertionInfo::new(hash, source)) + // }, + // (_, true) => + // Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + // (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), + // }; + // log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, + // result.as_ref().map(|r| r.hash)); + } + /// Attempts to replace an existing transaction in the memory pool with a new one. /// /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully @@ -321,15 +423,14 @@ where pub(super) fn try_replace_transaction( &self, new_xt: ExtrinsicFor, + priority: TransactionPriority, source: TransactionSource, watched: bool, - replaced_tx_hash: ExtrinsicHash, ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&new_xt); - self.try_insert( + self.try_insert_with_dropping( hash, - TxInMemPool::new(watched, source, new_xt, length), - Some(replaced_tx_hash), + TxInMemPool::new_with_priority(watched, source, new_xt, length, priority), ) } @@ -347,7 +448,7 @@ where .iter() .map(|xt| { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length), None) + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) }) .collect::>(); result @@ -361,7 +462,7 @@ where xt: ExtrinsicFor, ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length), None) + self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -493,7 +594,11 @@ where } pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { - // todo!() + if let Some(priority) = outcome.priority() { + if let Some(tx) = self.transactions.write().get_mut(&outcome.hash()) { + tx.priority.store(priority, atomic::Ordering::Relaxed); + } + } } } @@ -650,4 +755,6 @@ mod tx_mem_pool_tests { sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } + + //add some test for try_insert_with_dropping } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 4ae54e99264c..89bf47cc3895 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -262,7 +262,7 @@ where log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); Err(err) }, - None => Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)), Some(Ok(r)) => Ok(r.into()), } } @@ -320,8 +320,7 @@ where }, Some(Ok(result)) => Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), - None => - Ok(ViewStoreSubmitOutcome::new_no_priority(tx_hash).with_watcher(external_watcher)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None).with_watcher(external_watcher)), } } diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 6c3bbbf34b55..818c1ebe544f 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -173,6 +173,16 @@ where pub fn len(&mut self) -> usize { self.inner_guard.len() } + + /// Returns an iterator over all values. + pub fn values(&self) -> std::collections::hash_map::Values { + self.inner_guard.values() + } + + /// Returns an iterator over all key-value pairs. + pub fn iter(&self) -> Iter<'_, K, V> { + self.inner_guard.iter() + } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 720a80698538..0c0317750f85 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -76,6 +76,14 @@ impl ValidatedTransaction { valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } + + /// Returns priority for valid transaction, None if transaction is not valid. + pub fn priority(&self) -> Option { + match self { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => Some(*priority), + _ => None, + } + } } /// A type of validated transaction stored in the validated pool. @@ -107,8 +115,9 @@ pub struct BaseSubmitOutcome { hash: ExtrinsicHash, /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. watcher: Option, - /// The priority of the transaction. Defaults to zero if unknown. - priority: TransactionPriority, + + /// The priority of the transaction. Defaults to None if unknown. + priority: Option, } /// Type alias to outcome of submission to `ValidatedPool`. @@ -117,15 +126,10 @@ pub type ValidatedPoolSubmitOutcome = impl BaseSubmitOutcome { /// Creates a new instance with given hash and priority. - pub fn new(hash: ExtrinsicHash, priority: TransactionPriority) -> Self { + pub fn new(hash: ExtrinsicHash, priority: Option) -> Self { Self { hash, priority, watcher: None } } - /// Creates a new instance with given hash and zeroed priority. - pub fn new_no_priority(hash: ExtrinsicHash) -> Self { - Self { hash, priority: 0u64, watcher: None } - } - /// Sets the transaction watcher. pub fn with_watcher(mut self, watcher: W) -> Self { self.watcher = Some(watcher); @@ -133,7 +137,7 @@ impl BaseSubmitOutcome { } /// Provides priority of submitted transaction. - pub fn priority(&self) -> TransactionPriority { + pub fn priority(&self) -> Option { self.priority } @@ -283,7 +287,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), priority)) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), Some(priority))) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 3c7db35323d8..abd8ebec2296 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -282,10 +282,8 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { api.set_priority(&xt4, 5); api.set_priority(&xt5, 6); - let _xt0_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let _xt1_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 2); @@ -313,13 +311,11 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { // let header04 = api.push_block_with_parent(header03.hash(), vec![], true); // block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!(xt2_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); - assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); - assert_ready_iterator!(header01.hash(), pool, [xt1, xt0]); - assert_ready_iterator!(header02.hash(), pool, []); + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); } From 4f767e59a37b759c8588148671fb52c4fc236d2c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 4 Dec 2024 10:54:58 +0100 Subject: [PATCH 20/29] merge / rebase fixes --- .../src/fork_aware_txpool/dropped_watcher.rs | 2 +- .../src/fork_aware_txpool/fork_aware_txpool.rs | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 48767ad61605..93fbb79ce261 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -208,7 +208,7 @@ where views.remove(&key); //todo: merge heads up warning! if views.is_empty() { - ctx.pending_dropped_transactions.push(*tx_hash); + self.pending_dropped_transactions.push(*tx_hash); } }); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 6b8a6be22458..3f8d66110432 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -654,7 +654,10 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) + return Ok(mempool_results + .into_iter() + .map(|r| r.map(|r| r.hash).map_err(Into::into)) + .collect::>()) } //todo: review + test maybe? @@ -690,7 +693,9 @@ where Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|insertion| { + result + .map_err(Into::into) + .and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") From 6ba133e3bf9872bdf8402e20c99f830132913668 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:44:18 +0100 Subject: [PATCH 21/29] mempool: prio is now locked option --- .../src/fork_aware_txpool/tx_mem_pool.rs | 43 +++++++++++++------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index c851af111e32..84963652ffa6 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -38,6 +38,7 @@ use crate::{ }; use futures::FutureExt; use itertools::Itertools; +use parking_lot::RwLock; use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ @@ -84,9 +85,9 @@ where source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, - /// Priority of transaction at some block. It is assumed it will not be changed often. - //todo: Option is needed here. This means lock. So maybe +AtomicBool? - priority: AtomicU64, + /// Priority of transaction at some block. It is assumed it will not be changed often. None if + /// not known. + priority: RwLock>, //todo: we need to add future / ready status at finalized block. //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means // to replace them somehow with newly coming transactions. @@ -126,7 +127,7 @@ where tx: ExtrinsicFor, bytes: usize, ) -> Self { - Self::new_with_priority(watched, source, tx, bytes, 0) + Self::new_with_optional_priority(watched, source, tx, bytes, None) } /// Creates a new instance of wrapper for a transaction with given priority. @@ -136,6 +137,17 @@ where tx: ExtrinsicFor, bytes: usize, priority: TransactionPriority, + ) -> Self { + Self::new_with_optional_priority(watched, source, tx, bytes, Some(priority)) + } + + /// Creates a new instance of wrapper for a transaction with given priority. + fn new_with_optional_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: Option, ) -> Self { Self { watched, @@ -143,7 +155,7 @@ where source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), bytes, - priority: AtomicU64::new(priority), + priority: priority.into(), } } @@ -160,8 +172,8 @@ where } /// Returns the priority of the transaction. - pub(crate) fn priority(&self) -> TransactionPriority { - self.priority.load(atomic::Ordering::Relaxed) + pub(crate) fn priority(&self) -> Option { + *self.priority.read() } } @@ -357,8 +369,10 @@ where return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); } - let mut sorted = - transactions.iter().map(|(h, v)| (h.clone(), v.clone())).collect::>(); + let mut sorted = transactions + .iter() + .filter_map(|(h, v)| v.priority().map(|_| (h.clone(), v.clone()))) + .collect::>(); // When pushing higher prio transaction, we need to find a number of lower prio txs, such // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size @@ -594,11 +608,12 @@ where } pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { - if let Some(priority) = outcome.priority() { - if let Some(tx) = self.transactions.write().get_mut(&outcome.hash()) { - tx.priority.store(priority, atomic::Ordering::Relaxed); - } - } + outcome.priority().map(|priority| { + self.transactions + .write() + .get_mut(&outcome.hash()) + .map(|p| *p.priority.write() = Some(priority)) + }); } } From 46fa1fd6a3da4fcf7e61d862d1b63facb32d4d15 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 4 Dec 2024 18:04:31 +0100 Subject: [PATCH 22/29] tests added + dead code cleanup --- .../src/fork_aware_txpool/tx_mem_pool.rs | 241 ++++++++++++++++-- .../transaction-pool/src/graph/base_pool.rs | 10 - .../transaction-pool/src/graph/tracked_map.rs | 5 - .../src/graph/validated_pool.rs | 29 --- 4 files changed, 213 insertions(+), 72 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 84963652ffa6..f2a2e6f05b3e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -369,6 +369,10 @@ where return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); } + if new_tx.bytes > self.max_transactions_total_bytes { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + let mut sorted = transactions .iter() .filter_map(|(h, v)| v.priority().map(|_| (h.clone(), v.clone()))) @@ -378,7 +382,7 @@ where // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size // limits. Naive way to do it - rev-sort by priority and eat the tail. - // reverse (lowest prio last) + // reverse (oldest, lowest prio last) sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { (Some(a), Some(b)) => b.cmp(&a), @@ -387,11 +391,11 @@ where ordering @ _ => ordering, }); - let required_size = new_tx.bytes; - let mut total_size = 0usize; + let mut total_size_removed = 0usize; let mut to_be_removed = vec![]; + let free_bytes = self.max_transactions_total_bytes - self.transactions.bytes(); - while total_size < required_size { + loop { let Some((worst_hash, worst_tx)) = sorted.pop() else { return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); }; @@ -400,8 +404,12 @@ where return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); } - total_size += worst_tx.bytes; + total_size_removed += worst_tx.bytes; to_be_removed.push(worst_hash); + + if free_bytes + total_size_removed >= new_tx.bytes { + break; + } } let source = new_tx.source(); @@ -412,28 +420,16 @@ where debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + } - // let result = match ( - // self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), - // transactions.contains_key(&hash), - // ) { - // (false, false) => { - // let source = tx.source(); - // transactions.insert(hash, Arc::from(tx)); - // Ok(InsertionInfo::new(hash, source)) - // }, - // (_, true) => - // Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), - // (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), - // }; - // log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, - // result.as_ref().map(|r| r.hash)); - } - - /// Attempts to replace an existing transaction in the memory pool with a new one. + /// Attempts to insert a new transaction in the memory pool and drop some worse existing + /// transactions. /// - /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully - /// inserted; otherwise, returns an appropriate error indicating the failure. + /// This operation will not overflow the limit of the mempool. It means that cumulative + /// size of removed transactions will be equal (or greated) then size of newly inserted + /// transaction. + /// + /// Refer to [`try_insert_with_dropping`] for more details. pub(super) fn try_replace_transaction( &self, new_xt: ExtrinsicFor, @@ -736,6 +732,9 @@ mod tx_mem_pool_tests { assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); } + /// size of large extrinsic + const LARGE_XT_SIZE: usize = 1129; + fn large_uxt(x: usize) -> Extrinsic { ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() } @@ -745,8 +744,7 @@ mod tx_mem_pool_tests { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); - //size of large extrinsic is: 1129 - let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); @@ -771,5 +769,192 @@ mod tx_mem_pool_tests { )); } - //add some test for try_insert_with_dropping + #[test] + fn replacing_works_00() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + + let xt = Arc::from(large_uxt(98)); + let hash = api.hash_and_length(&xt).0; + let result = mempool + .try_replace_transaction(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..1]); + } + + #[test] + fn replacing_works_01() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE); + + submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + + //this one should drop 2 xts (size: 1130): + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build()); + let (hash, length) = api.hash_and_length(&xt); + assert_eq!(length, 1130); + let result = mempool + .try_replace_transaction(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..2]); + } + + #[test] + fn replacing_works_02() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .enumerate() + .map(|(prio, t)| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some((COUNT - prio).try_into().unwrap())), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + + //this one should drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let (hash, length) = api.hash_and_length(&xt); + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + let result = mempool + .try_replace_transaction(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert!(result.removed.iter().eq(hashes[COUNT - 3..COUNT].iter().rev())); + } + + #[test] + fn replacing_skips_lower_prio_tx() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = 100u64; + let low_prio = 10u64; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let submit_outcomes = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + ViewStoreSubmitOutcome::new(h, Some(hi_prio)) + }) + .collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + + //this one should drop 3 xts (each of size 1129) + let xt = Arc::from(large_uxt(98)); + let result = + mempool.try_replace_transaction(xt, low_prio, TransactionSource::External, false); + + // we did not update priorities (update_transaction was not called): + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } + + #[test] + fn replacing_is_skipped_if_prios_are_not_set() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + //this one could drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let length = api.hash_and_length(&xt).1; + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + + let result = + mempool.try_replace_transaction(xt, hi_prio, TransactionSource::External, false); + + // we did not update priorities (update_transaction was not called): + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 8dce507be4e3..3b4afc88b789 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -438,16 +438,6 @@ impl BasePool(&self, init: R, mut f: F) -> R - where - F: FnMut(R, &Arc>) -> R, - { - self.ready - .fold::(init, |acc, current| f(acc, ¤t.transaction.transaction)) - } - /// Makes sure that the transactions in the queues stay within provided limits. /// /// Removes and returns worst transactions from the queues and all transactions that depend on diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 818c1ebe544f..fe15c6eca308 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -174,11 +174,6 @@ where self.inner_guard.len() } - /// Returns an iterator over all values. - pub fn values(&self) -> std::collections::hash_map::Values { - self.inner_guard.values() - } - /// Returns an iterator over all key-value pairs. pub fn iter(&self) -> Iter<'_, K, V> { self.inner_guard.iter() diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 0c0317750f85..287c0bd03dbc 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -178,8 +178,6 @@ impl Clone for ValidatedPool { } } -type BaseTransactionFor = base::Transaction, ExtrinsicFor>; - impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { @@ -745,33 +743,6 @@ impl ValidatedPool { }); } - /// Searches the base pool for a transaction with a lower priority than the given one. - /// - /// Returns the hash of a lower-priority transaction if found, otherwise `None`. - pub fn find_transaction_with_lower_prio( - &self, - tx: ValidatedTransactionFor, - ) -> Option> { - match tx { - ValidatedTransaction::Valid(base::Transaction { priority, .. }) => { - let pool = self.pool.read(); - pool.fold_ready( - Some((priority, Option::>>::None)), - |acc, current| { - let (priority, _) = acc.as_ref().expect("acc is initialized. qed"); - if current.priority < *priority { - return Some((current.priority, Some(current.clone()))) - } else { - return acc - } - }, - ) - .map(|r| r.1.map(|tx| tx.hash))? - }, - _ => None, - } - } - /// Removes a transaction subtree from the pool, starting from the given transaction hash. /// /// This function traverses the dependency graph of transactions and removes the specified From 2221d7a4ae0d897636ba465046d4bf94cd46e862 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 4 Dec 2024 18:13:32 +0100 Subject: [PATCH 23/29] comments cleanup --- .../src/fork_aware_txpool/fork_aware_txpool.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 3f8d66110432..5f26e580c3ab 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1329,14 +1329,12 @@ where watched: bool, xt: ExtrinsicFor, ) -> Result>, TxPoolApiError> { - // 1. we should validate at most_recent, and reuse result to submit there. let at = self .view_store .most_recent_view .read() .ok_or(TxPoolApiError::ImmediatelyDropped)?; - // 2. validate transaction at `at` let (best_view, _) = self .view_store .get_view_at(at, false) @@ -1354,21 +1352,15 @@ where .await; if let Some(priority) = validated_tx.priority() { - // 3. check if mempool contains a transaction with lower priority (actually worse - do - // we want to check timestamp too - no: see #4609?) Would be perfect to choose - // transaction with lowest the number of dependant txs in its subtree. - // 4. if yes - remove worse transaction from mempool and add new one. let insertion_info = self.mempool.try_replace_transaction(xt, priority, source, watched)?; for worst_hash in &insertion_info.removed { log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); - // 5. notify listner self.view_store .listener .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); - // 6. remove transaction from the view_store self.view_store.remove_transaction_subtree( *worst_hash, |listener, removed_tx_hash| { @@ -1377,8 +1369,7 @@ where ); } - // 8. add to pending_replacements - make sure it will not sneak back via cloned view - // 9. subemit new one to the view, this will be done upon in the caller + // todo: add to pending_replacements - make sure it will not sneak back via cloned view return Ok(insertion_info) } From 0244ba032f1a63a76ecc2a936d5d1003bb2280da Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:52:11 +0100 Subject: [PATCH 24/29] tweaks --- .../transaction-pool/src/fork_aware_txpool/dropped_watcher.rs | 4 ++-- .../src/fork_aware_txpool/fork_aware_txpool.rs | 1 - .../transaction-pool/src/fork_aware_txpool/view_store.rs | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 93fbb79ce261..91909fa5a906 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -335,14 +335,14 @@ where let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { if let Some(dropped) = ctx.get_pending_dropped_transaction() { - debug!("dropped_watcher: sending out (pending): {dropped:?}"); + trace!("dropped_watcher: sending out (pending): {dropped:?}"); return Some((dropped, ctx)); } tokio::select! { biased; Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { - debug!("dropped_watcher: sending out: {dropped:?}"); + trace!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } }, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 5f26e580c3ab..f4a659eec9b6 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -1369,7 +1369,6 @@ where ); } - // todo: add to pending_replacements - make sure it will not sneak back via cloned view return Ok(insertion_info) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 89bf47cc3895..931a8addb1f3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -804,8 +804,8 @@ where /// /// A `listener_action` callback function is invoked for every transaction that is removed, /// providing a reference to the pool's listener and the hash of the removed transaction. This - /// allows to trigger the required events. Note listener may be called multiple times for the - /// same hash. + /// allows to trigger the required events. Note that listener may be called multiple times for + /// the same hash. /// /// Function will also schedule view pre-insertion actions to ensure that transactions will be /// removed from newly created view. From 5d0283e2d2be74040b371e710028ec326a30ea49 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 8 Jan 2025 11:51:43 +0100 Subject: [PATCH 25/29] review comments --- .../fork_aware_txpool/fork_aware_txpool.rs | 79 +++++++++++-------- .../src/fork_aware_txpool/tx_mem_pool.rs | 28 ++++--- .../src/fork_aware_txpool/view.rs | 3 +- .../src/fork_aware_txpool/view_store.rs | 2 +- .../client/transaction-pool/src/graph/mod.rs | 3 + .../transaction-pool/tests/fatp_prios.rs | 6 +- 6 files changed, 75 insertions(+), 46 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index f4a659eec9b6..82e5474a6cf1 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -660,7 +660,7 @@ where .collect::>()) } - //todo: review + test maybe? + // Submit all the transactions to the mempool let retries = mempool_results .into_iter() .zip(xts.clone()) @@ -675,6 +675,7 @@ where let mempool_results = futures::future::join_all(retries).await; + // Collect transactions that were successfully submitted to the mempool... let to_be_submitted = mempool_results .iter() .zip(xts) @@ -686,26 +687,45 @@ where self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); + // ... and submit them to the view_store. Please note that transaction rejected by mempool + // are not sent here. let mempool = self.mempool.clone(); let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); + // Note for composing final result: + // + // For each failed insertion into the mempool, the mempool result should be placed into + // the returned vector. + // + // For each successful insertion into the mempool, the corresponding + // view_store submission result needs to be examined: + // - If there is an error during view_store submission, the transaction is removed from + // the mempool, and the final result recorded in the vector for this transaction is the + // view_store submission error. + // + // - If the view_store submission is successful, the transaction priority is updated in the + // mempool. + // + // Finally, it collects the hashes of updated transactions or submission errors (either + // from the mempool or view_store) into a returned vector. Ok(mempool_results .into_iter() .map(|result| { result .map_err(Into::into) .and_then(|insertion| { - submission_results - .next() - .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_|{ - mempool.remove_transaction(&insertion.hash); - }) + submission_results + .next() + .expect("The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed.") + .inspect_err(|_|{ + mempool.remove_transaction(&insertion.hash); + }) }) + }) .map(|r| r.map(|r| { - mempool.update_transaction(&r); + mempool.update_transaction_priority(&r); r.hash() })) .collect::>()) @@ -758,7 +778,7 @@ where self.mempool.remove_transaction(&xt_hash); }) .map(|mut outcome| { - self.mempool.update_transaction(&outcome); + self.mempool.update_transaction_priority(&outcome); outcome.expect_watcher() }) } @@ -897,7 +917,7 @@ where self.view_store .submit_local(xt) .map(|outcome| { - self.mempool.update_transaction(&outcome); + self.mempool.update_transaction_priority(&outcome); outcome.hash() }) .or_else(|_| Ok(xt_hash)) @@ -1153,10 +1173,8 @@ where .into_iter() .zip(hashes) .map(|(result, tx_hash)| { - //todo: we may need a bool flag here indicating if we need to update priority - //(premature optimization) result - .map(|outcome| self.mempool.update_transaction(&outcome.into())) + .map(|outcome| self.mempool.update_transaction_priority(&outcome.into())) .or_else(|_| Err(tx_hash)) }) .collect::>(); @@ -1351,28 +1369,25 @@ where ) .await; - if let Some(priority) = validated_tx.priority() { - let insertion_info = - self.mempool.try_replace_transaction(xt, priority, source, watched)?; - - for worst_hash in &insertion_info.removed { - log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); - self.view_store - .listener - .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); - - self.view_store.remove_transaction_subtree( - *worst_hash, - |listener, removed_tx_hash| { - listener.limits_enforced(&removed_tx_hash); - }, - ); - } + let Some(priority) = validated_tx.priority() else { + return Err(TxPoolApiError::ImmediatelyDropped) + }; + + let insertion_info = self.mempool.try_replace_transaction(xt, priority, source, watched)?; - return Ok(insertion_info) + for worst_hash in &insertion_info.removed { + log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); + + self.view_store + .remove_transaction_subtree(*worst_hash, |listener, removed_tx_hash| { + listener.limits_enforced(&removed_tx_hash); + }); } - Err(TxPoolApiError::ImmediatelyDropped) + return Ok(insertion_info) } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 95b88e7dde7c..08eb3f0aa6c8 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -388,7 +388,7 @@ where (Some(a), Some(b)) => b.cmp(&a), _ => Ordering::Equal, }, - ordering @ _ => ordering, + ordering => ordering, }); let mut total_size_removed = 0usize; @@ -429,7 +429,7 @@ where /// size of removed transactions will be equal (or greated) then size of newly inserted /// transaction. /// - /// Refer to [`try_insert_with_dropping`] for more details. + /// Refer to [`TxMemPool::try_insert_with_dropping`] for more details. pub(super) fn try_replace_transaction( &self, new_xt: ExtrinsicFor, @@ -603,7 +603,9 @@ where self.listener.invalidate_transactions(&invalid_hashes); } - pub(super) fn update_transaction(&self, outcome: &ViewStoreSubmitOutcome) { + /// Updates the priority of transaction stored in mempool using provided view_store submission + /// outcome. + pub(super) fn update_transaction_priority(&self, outcome: &ViewStoreSubmitOutcome) { outcome.priority().map(|priority| { self.transactions .write() @@ -794,7 +796,9 @@ mod tx_mem_pool_tests { assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); let xt = Arc::from(large_uxt(98)); let hash = api.hash_and_length(&xt).0; @@ -832,7 +836,9 @@ mod tx_mem_pool_tests { assert_eq!(mempool.bytes(), total_xts_bytes); assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE); - submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); //this one should drop 2 xts (size: 1130): let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build()); @@ -871,7 +877,9 @@ mod tx_mem_pool_tests { assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); //this one should drop 3 xts (each of size 1129) let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); @@ -911,14 +919,16 @@ mod tx_mem_pool_tests { assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes.into_iter().for_each(|o| mempool.update_transaction(&o)); + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); //this one should drop 3 xts (each of size 1129) let xt = Arc::from(large_uxt(98)); let result = mempool.try_replace_transaction(xt, low_prio, TransactionSource::External, false); - // we did not update priorities (update_transaction was not called): + // we did not update priorities (update_transaction_priority was not called): assert!(matches!( result.unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped @@ -951,7 +961,7 @@ mod tx_mem_pool_tests { let result = mempool.try_replace_transaction(xt, hi_prio, TransactionSource::External, false); - // we did not update priorities (update_transaction was not called): + // we did not update priorities (update_transaction_priority was not called): assert!(matches!( result.unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 291a1f7d7fa3..a35d68120a3a 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -463,8 +463,7 @@ where /// Removes the whole transaction subtree from the inner pool. /// - /// Intended to be called when removal is a result of replacement. Provided `replaced_with` - /// transaction hash is used in emitted _usurped_ event. + /// Refer to [`crate::graph::ValidatedPool::remove_subtree`] for more details. pub fn remove_subtree( &self, tx_hash: ExtrinsicHash, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 931a8addb1f3..8c37eed679d7 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -796,7 +796,7 @@ where let _results = futures::future::join_all(submit_futures).await; } - /// Removes a transaction subtree from the every view in the view_store, starting from the given + /// Removes a transaction subtree from every view in the view_store, starting from the given /// transaction hash. /// /// This function traverses the dependency graph of transactions and removes the specified diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 008503fb4cdf..2114577f4dee 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -47,3 +47,6 @@ pub use validated_pool::{ pub(crate) use self::pool::CheckBannedBeforeVerify; pub(crate) use listener::DroppedByLimitsEvent; + +#[cfg(doc)] +pub(crate) use validated_pool::ValidatedPool; diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 86352fa68fee..94ca75a3fe30 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -291,8 +291,10 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); assert_pool_status!(header02.hash(), &pool, 2, 0); assert_eq!(pool.mempool_len().1, 4); From caca2e1e816b69e9fca7ed4a83729509a19e47a6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:18:11 +0100 Subject: [PATCH 26/29] clippy --- .../src/fork_aware_txpool/fork_aware_txpool.rs | 7 +++---- .../transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs | 2 +- .../transaction-pool/src/fork_aware_txpool/view_store.rs | 3 +-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 82e5474a6cf1..6f0e79cce2dc 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -643,7 +643,7 @@ where /// are reduced to single result. Refer to `reduce_multiview_result` for more details. async fn submit_at( &self, - at: ::Hash, + _: ::Hash, source: TransactionSource, xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { @@ -667,7 +667,7 @@ where .map(|(result, xt)| async move { match result { Err(TxPoolApiError::ImmediatelyDropped) => - self.attempt_transaction_replacement(at, source, false, xt).await, + self.attempt_transaction_replacement(source, false, xt).await, result @ _ => result, } }) @@ -765,7 +765,7 @@ where match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, Err(TxPoolApiError::ImmediatelyDropped) => - self.attempt_transaction_replacement(at, source, true, xt.clone()).await?, + self.attempt_transaction_replacement(source, true, xt.clone()).await?, Err(e) => return Err(e.into()), }; @@ -1342,7 +1342,6 @@ where /// transaction was dropped immediately. async fn attempt_transaction_replacement( &self, - _: Block::Hash, source: TransactionSource, watched: bool, xt: ExtrinsicFor, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 08eb3f0aa6c8..6163ccde9167 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -375,7 +375,7 @@ where let mut sorted = transactions .iter() - .filter_map(|(h, v)| v.priority().map(|_| (h.clone(), v.clone()))) + .filter_map(|(h, v)| v.priority().map(|_| (*h, v.clone()))) .collect::>(); // When pushing higher prio transaction, we need to find a number of lower prio txs, such diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 8c37eed679d7..e08a0b12854e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -839,8 +839,7 @@ where .iter() .chain(self.inactive_views.read().iter()) .filter(|(_, view)| view.is_imported(&xt_hash)) - .map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) - .flatten() + .flat_map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) .filter(|xt_hash| seen.insert(*xt_hash)) .collect(); From b86ef05398ba515932b2f4a4ae978ed23912479f Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:36:07 +0100 Subject: [PATCH 27/29] clean up --- .../src/fork_aware_txpool/dropped_watcher.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 91909fa5a906..50ebd15a63e4 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -206,10 +206,6 @@ where views ); views.remove(&key); - //todo: merge heads up warning! - if views.is_empty() { - self.pending_dropped_transactions.push(*tx_hash); - } }); self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { @@ -263,10 +259,12 @@ where }, TransactionStatus::Ready | TransactionStatus::InBlock(..) => { // note: if future transaction was once seens as the ready we may want to treat it - // as ready transactions. Unreferenced future transactions are more likely to be - // removed when the last referencing view is removed then ready transactions. - // Transcaction seen as ready is likely quite close to be included in some - // future fork. + // as ready transaction. The rationale behind this is as follows: we want to remove + // unreferenced future transactions when the last referencing view is removed (to + // avoid clogging mempool). For ready transactions we prefer to keep them in mempool + // even if no view is currently referencing them. Future transcaction once seen as + // ready is likely quite close to be included in some future fork (it is close to be + // ready, so we make exception and treat such transaction as ready). if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { views.insert(block_hash); self.ready_transaction_views.insert(tx_hash, views); From 429426134ec18030a99b711f2d6729439515039a Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Thu, 9 Jan 2025 14:28:43 +0000 Subject: [PATCH 28/29] Update from michalkucharczyk running command 'prdoc --bump minor --audience node_dev' --- prdoc/pr_6647.prdoc | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 prdoc/pr_6647.prdoc diff --git a/prdoc/pr_6647.prdoc b/prdoc/pr_6647.prdoc new file mode 100644 index 000000000000..b86de04a342d --- /dev/null +++ b/prdoc/pr_6647.prdoc @@ -0,0 +1,16 @@ +title: '`fatxpool`: proper handling of priorities when mempool is full' +doc: +- audience: Node Dev + description: |- + Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full. + + **Notes for reviewers:** + - The _tx_mem_pool_ now maintains information about transaction priority. Although _tx_mem_pool_ itself is stateless, transaction priority is updated after submission to the view. An alternative approach could involve validating transactions at the `at` block, but this is computationally expensive. To avoid additional validation overhead, I opted to use the priority obtained from runtime during submission to the view. This is the rationale behind introducing the `SubmitOutcome` struct, which synchronously communicates transaction priority from the view to the pool. This results in a very brief window during which the transaction priority remains unknown - those transaction are not taken into consideration while dropping takes place. In the future, if needed, we could update transaction priority using view revalidation results to keep this information fully up-to-date (as priority of transaction may change with chain-state evolution). + - When _tx_mem_pool_ becomes full (an event anticipated to be rare), transaction priority must be known to perform priority-based removal. In such cases, the most recent block known is utilized for validation. I think that speculative submission to the view and re-using the priority from this submission would be an unnecessary complication. + - Once the priority is determined, lower-priority transactions whose cumulative size meets or exceeds the size of the new transaction are collected to ensure the pool size limit is not exceeded. + - Transaction removed from _tx_mem_pool_ , also needs to be removed from all the views with appropriate event (which is done by `remove_transaction_subtree`). To ensure complete removal, the `PendingTxReplacement` struct was re-factored to more generic `PendingPreInsertTask` (introduced in #6405) which covers removal and submssion of transaction in the view which may be potentially created in the background. This is to ensure that removed transaction will not re-enter to the newly created view. + + Closes: #5809 +crates: +- name: sc-transaction-pool + bump: minor From b4290cd8918a9281f05f3bae1ab52fcceb62b2be Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:20:01 +0100 Subject: [PATCH 29/29] Update prdoc/pr_6647.prdoc --- prdoc/pr_6647.prdoc | 8 -------- 1 file changed, 8 deletions(-) diff --git a/prdoc/pr_6647.prdoc b/prdoc/pr_6647.prdoc index b86de04a342d..47af9924ef1c 100644 --- a/prdoc/pr_6647.prdoc +++ b/prdoc/pr_6647.prdoc @@ -3,14 +3,6 @@ doc: - audience: Node Dev description: |- Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full. - - **Notes for reviewers:** - - The _tx_mem_pool_ now maintains information about transaction priority. Although _tx_mem_pool_ itself is stateless, transaction priority is updated after submission to the view. An alternative approach could involve validating transactions at the `at` block, but this is computationally expensive. To avoid additional validation overhead, I opted to use the priority obtained from runtime during submission to the view. This is the rationale behind introducing the `SubmitOutcome` struct, which synchronously communicates transaction priority from the view to the pool. This results in a very brief window during which the transaction priority remains unknown - those transaction are not taken into consideration while dropping takes place. In the future, if needed, we could update transaction priority using view revalidation results to keep this information fully up-to-date (as priority of transaction may change with chain-state evolution). - - When _tx_mem_pool_ becomes full (an event anticipated to be rare), transaction priority must be known to perform priority-based removal. In such cases, the most recent block known is utilized for validation. I think that speculative submission to the view and re-using the priority from this submission would be an unnecessary complication. - - Once the priority is determined, lower-priority transactions whose cumulative size meets or exceeds the size of the new transaction are collected to ensure the pool size limit is not exceeded. - - Transaction removed from _tx_mem_pool_ , also needs to be removed from all the views with appropriate event (which is done by `remove_transaction_subtree`). To ensure complete removal, the `PendingTxReplacement` struct was re-factored to more generic `PendingPreInsertTask` (introduced in #6405) which covers removal and submssion of transaction in the view which may be potentially created in the background. This is to ensure that removed transaction will not re-enter to the newly created view. - - Closes: #5809 crates: - name: sc-transaction-pool bump: minor