diff --git a/doc/policy/packages.md b/doc/policy/packages.md index 274854ddf93de..2a5758318a9b6 100644 --- a/doc/policy/packages.md +++ b/doc/policy/packages.md @@ -80,24 +80,37 @@ test accepts): If any transactions in the package are already in the mempool, they are not submitted again ("deduplicated") and are thus excluded from this calculation. -To meet the two feerate requirements of a mempool, i.e., the pre-configured minimum relay feerate -(`-minrelaytxfee`) and the dynamic mempool minimum feerate, the total package feerate is used instead -of the individual feerate. The individual transactions are allowed to be below the feerate -requirements if the package meets the feerate requirements. For example, the parent(s) in the -package can pay no fees but be paid for by the child. - -*Rationale*: This can be thought of as "CPFP within a package," solving the issue of a parent not -meeting minimum fees on its own. This would allow contracting applications to adjust their fees at -broadcast time instead of overshooting or risking becoming stuck or pinned. - -*Rationale*: It would be incorrect to use the fees of transactions that are already in the mempool, as -we do not want a transaction's fees to be double-counted. +To meet the dynamic mempool minimum feerate, i.e., the feerate determined by the transactions +evicted when the mempool reaches capacity (not the static minimum relay feerate), the total package +feerate instead of individual feerate can be used. For example, if the mempool minimum feerate is +5sat/vB and a 1sat/vB parent transaction has a high-feerate child, it may be accepted if +submitted as a package. + +*Rationale*: This can be thought of as "CPFP within a package," solving the issue of a presigned +transaction (i.e. in which a replacement transaction with a higher fee cannot be signed) being +rejected from the mempool when transaction volume is high and the mempool minimum feerate rises. + +Note: Package feerate cannot be used to meet the minimum relay feerate (`-minrelaytxfee`) +requirement. For example, if the mempool minimum feerate is 5sat/vB and the minimum relay feerate is +set to 5satvB, a 1sat/vB parent transaction with a high-feerate child will not be accepted, even if +submitted as a package. + +*Rationale*: Avoid situations in which the mempool contains non-bumped transactions below min relay +feerate (which we consider to have pay 0 fees and thus receiving free relay). While package +submission would ensure these transactions are bumped at the time of entry, it is not guaranteed +that the transaction will always be bumped. For example, a later transaction could replace the +fee-bumping child without still bumping the parent. These no-longer-bumped transactions should be +removed during a replacement, but we do not have a DoS-resistant way of removing them or enforcing a +limit on their quantity. Instead, prevent their entry into the mempool. Implementation Note: Transactions within a package are always validated individually first, and package validation is used for the transactions that failed. Since package feerate is only calculated using transactions that are not in the mempool, this implementation detail affects the outcome of package validation. +*Rationale*: It would be incorrect to use the fees of transactions that are already in the mempool, as +we do not want a transaction's fees to be double-counted. + *Rationale*: Packages are intended for incentive-compatible fee-bumping: transaction B is a "legitimate" fee-bump for transaction A only if B is a descendant of A and has a *higher* feerate than A. We want to prevent "parents pay for children" behavior; fees of parents should not help diff --git a/src/Makefile.am b/src/Makefile.am index 1d7004ac863bf..234d90732334f 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -219,6 +219,7 @@ BITCOIN_CORE_H = \ node/minisketchwrapper.h \ node/psbt.h \ node/transaction.h \ + node/txpackagetracker.h \ node/txreconciliation.h \ node/utxo_snapshot.h \ node/validation_cache_args.h \ @@ -410,6 +411,7 @@ libbitcoin_node_a_SOURCES = \ node/minisketchwrapper.cpp \ node/psbt.cpp \ node/transaction.cpp \ + node/txpackagetracker.cpp \ node/txreconciliation.cpp \ node/utxo_snapshot.cpp \ node/validation_cache_args.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 15d5a17cec837..8267fed8fd848 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -150,6 +150,7 @@ BITCOIN_TESTS =\ test/translation_tests.cpp \ test/txindex_tests.cpp \ test/txpackage_tests.cpp \ + test/txpackagetracker_tests.cpp \ test/txreconciliation_tests.cpp \ test/txrequest_tests.cpp \ test/txvalidation_tests.cpp \ diff --git a/src/consensus/validation.h b/src/consensus/validation.h index ad8ee676b2cbc..747b5cdb0aebe 100644 --- a/src/consensus/validation.h +++ b/src/consensus/validation.h @@ -54,6 +54,7 @@ enum class TxValidationResult { TX_CONFLICT, TX_MEMPOOL_POLICY, //!< violated mempool's fee/size/descendant/RBF/etc limits TX_NO_MEMPOOL, //!< this node does not have a mempool so can't validate the transaction + TX_LOW_FEE, //!< fee was insufficient to meet some policy (minimum/RBF/etc) }; /** A "reason" why a block was invalid, suitable for determining whether the diff --git a/src/init.cpp b/src/init.cpp index 1122496539d8d..8be2bbd06e0c2 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -488,6 +489,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-i2psam=", "I2P SAM proxy to reach I2P peers and accept I2P connections (default: none)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-i2pacceptincoming", strprintf("Whether to accept inbound I2P connections (default: %i). Ignored if -i2psam is not set. Listening for inbound I2P connections is done through the SAM proxy, not by binding to a local address and port.", DEFAULT_I2P_ACCEPT_INCOMING), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onlynet=", "Make automatic outbound connections only to network (" + Join(GetNetworkNames(), ", ") + "). Inbound and manual connections are not affected by this option. It can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-packagerelay", strprintf("[EXPERIMENTAL] Support relaying transaction packages (default: %u)", node::DEFAULT_ENABLE_PACKAGE_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); @@ -1551,10 +1553,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } ChainstateManager& chainman = *Assert(node.chainman); + const bool enable_package_relay{gArgs.GetBoolArg("-packagerelay", node::DEFAULT_ENABLE_PACKAGE_RELAY)}; assert(!node.peerman); node.peerman = PeerManager::make(*node.connman, *node.addrman, node.banman.get(), - chainman, *node.mempool, ignores_incoming_txs); + chainman, *node.mempool, ignores_incoming_txs, enable_package_relay); RegisterValidationInterface(node.peerman.get()); // ********************************************************* Step 8: start indexers diff --git a/src/kernel/mempool_persist.cpp b/src/kernel/mempool_persist.cpp index 71f3aac3664ff..95592e207bf62 100644 --- a/src/kernel/mempool_persist.cpp +++ b/src/kernel/mempool_persist.cpp @@ -63,6 +63,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active } uint64_t num; file >> num; + LOCK2(cs_main, pool.cs); while (num) { --num; CTransactionRef tx; @@ -77,8 +78,12 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } if (nTime > TicksSinceEpoch(now - pool.m_expiry)) { - LOCK(cs_main); - const auto& accepted = AcceptToMemoryPool(active_chainstate, tx, nTime, /*bypass_limits=*/false, /*test_accept=*/false); + // Use bypass_limits=true to skip feerate checks, and call TrimToSize() at the very + // end. This means the mempool may temporarily exceed its maximum capacity. However, + // this means fee-bumped transactions are persisted, and the resulting mempool + // minimum feerate is not dependent on the order in which transactions are loaded + // from disk. + const auto& accepted = AcceptToMemoryPool(active_chainstate, tx, nTime, /*bypass_limits=*/true, /*test_accept=*/false); if (accepted.m_result_type == MempoolAcceptResult::ResultType::VALID) { ++count; } else { @@ -104,6 +109,13 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active for (const auto& i : mapDeltas) { pool.PrioritiseTransaction(i.first, i.second); } + const auto size_before_trim{pool.size()}; + // Ensure the maximum memory limits are ultimately enforced and any transactions below + // minimum feerates are evicted, since bypass_limits was set to true during ATMP calls. + pool.TrimToSize(pool.m_max_size_bytes); + const auto num_evicted{size_before_trim - pool.size()}; + count -= num_evicted; + failed += num_evicted; std::set unbroadcast_txids; file >> unbroadcast_txids; diff --git a/src/logging.cpp b/src/logging.cpp index 7725fefeb7de3..7bd4479654f3f 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -182,6 +182,7 @@ const CLogCategoryDesc LogCategories[] = {BCLog::BLOCKSTORE, "blockstorage"}, {BCLog::TXRECONCILIATION, "txreconciliation"}, {BCLog::SCAN, "scan"}, + {BCLog::TXPACKAGES, "txpackages"}, {BCLog::ALL, "1"}, {BCLog::ALL, "all"}, }; @@ -286,6 +287,8 @@ std::string LogCategoryToStr(BCLog::LogFlags category) return "txreconciliation"; case BCLog::LogFlags::SCAN: return "scan"; + case BCLog::LogFlags::TXPACKAGES: + return "txpackages"; case BCLog::LogFlags::ALL: return "all"; } diff --git a/src/logging.h b/src/logging.h index e7c554e79f83a..16f1d4665840f 100644 --- a/src/logging.h +++ b/src/logging.h @@ -68,6 +68,7 @@ namespace BCLog { BLOCKSTORE = (1 << 26), TXRECONCILIATION = (1 << 27), SCAN = (1 << 28), + TXPACKAGES = (1 << 29), ALL = ~(uint32_t)0, }; enum class Level { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 68bd91297c1fe..2833416636525 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -35,7 +36,6 @@ #include #include #include -#include #include #include // For NDEBUG compile time check #include @@ -263,6 +263,10 @@ struct Peer { /** Whether this peer relays txs via wtxid */ std::atomic m_wtxid_relay{false}; + + /** Whether this peer relays packages */ + std::atomic m_package_relay{false}; + /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. * It is *not* a p2p protocol violation for the peer to send us * transactions with a lower fee rate than this. See BIP133. */ @@ -492,7 +496,8 @@ class PeerManagerImpl final : public PeerManager public: PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, - CTxMemPool& pool, bool ignore_incoming_txs); + CTxMemPool& pool, bool ignore_incoming_txs, + bool enable_package_relay); /** Overridden from CValidationInterface. */ void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override @@ -598,6 +603,10 @@ class PeerManagerImpl final : public PeerManager bool ProcessOrphanTx(Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); + /** Validate package if any */ + void ProcessPackage(CNode& node, const node::TxPackageTracker::PackageToValidate& package) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); + /** Process a single headers message from a peer. * * @param[in] pfrom CNode of the peer @@ -680,6 +689,15 @@ class PeerManagerImpl final : public PeerManager void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** Helper function for AddOrphanResolutionCandidates, but can also be called by itself if the + * orphan is announced again later. */ + void AddOrphanAnnouncer(NodeId nodeid, const uint256& orphan_wtxid, const CTransactionRef& tx, std::chrono::microseconds current_time) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main, !m_peer_mutex); + + /** Register with orphan TxRequestTracker that a peer may help us resolve this orphan. */ + void AddOrphanResolutionCandidates(const CTransactionRef& orphan, NodeId originator) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main, !m_peer_mutex); + /** Send a version message to a peer */ void PushNodeVersion(CNode& pnode, const Peer& peer); @@ -716,6 +734,7 @@ class PeerManagerImpl final : public PeerManager CTxMemPool& m_mempool; TxRequestTracker m_txrequest GUARDED_BY(::cs_main); std::unique_ptr m_txreconciliation; + std::unique_ptr m_txpackagetracker; /** The height of the best chain */ std::atomic m_best_height{-1}; @@ -726,6 +745,9 @@ class PeerManagerImpl final : public PeerManager /** Whether this node is running in -blocksonly mode */ const bool m_ignore_incoming_txs; + /** Whether this node does package relay */ + const bool m_enable_package_relay; + bool RejectIncomingTxs(const CNode& peer) const; /** Whether we've completed initial sync yet, for determining when to turn @@ -772,6 +794,9 @@ class PeerManagerImpl final : public PeerManager /** Number of peers with wtxid relay. */ std::atomic m_wtxid_relay_peers{0}; + /** Number of peers with package relay. */ + std::atomic m_package_relay_peers{0}; + /** Number of outbound peers with m_chain_sync.m_protect. */ int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; @@ -781,13 +806,18 @@ class PeerManagerImpl final : public PeerManager /** Stalling timeout for blocks in IBD */ std::atomic m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; - bool AlreadyHaveTx(const GenTxid& gtxid) + bool AlreadyHaveTx(const GenTxid& gtxid, bool include_orphanage = true) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex); /** - * Filter for transactions that were recently rejected by the mempool. - * These are not rerequested until the chain tip changes, at which point - * the entire filter is reset. + * Filter for transactions that were recently rejected by the mempool for reasons other than too + * low fee. This filter only contains wtxids and txids of individual transactions. + * + * Upon receiving an announcement for a transaction, if it exists in this filter, do not + * download the txdata. Upon receiving a package info, if it contains a transaction in this + * filter, do not download the tx data. + * + * Reset this filter when the chain tip changes. * * Without this filter we'd be re-requesting txs from each of our peers, * increasing bandwidth consumption considerably. For instance, with 100 @@ -819,6 +849,33 @@ class PeerManagerImpl final : public PeerManager * Memory used: 1.3 MB */ CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001}; + /** + * Filter for transactions or packages of transactions that were recently rejected by + * the mempool but are eligible for reconsideration if submitted with other transactions. + * This filter only contains wtxids of individual transactions and combined hashes of packages + * (see GetCombinedHash and GetPackageHash). + * + * When a transaction's error is TX_LOW_FEE (in a package or by itself), add its wtxid to this + * filter. If it was in a package, also add the combined hash of the transactions in its + * subpackage to this filter. When a package fails for any reason, add the combined hash of all + * transactions in the package info to this filter. + * + * Upon receiving an announcement for a transaction, if it exists in this filter, do not + * download the txdata. Upon receiving a package info, if the combined hash of its transactions + * are in this filter, do not download the txdata. + * + * Reset this filter when the chain tip changes. + * + * We will only add wtxids to this filter. Groups of multiple transactions are represented by + * the hash of their wtxids, concatenated together in lexicographical order. + * + * Parameters are picked to be identical to that of m_recent_rejects, with the same rationale. + * Memory used: 1.3 MB + * FIXME: this filter can probably be smaller, but how much smaller? + */ + CRollingBloomFilter m_recent_rejects_reconsiderable GUARDED_BY(::cs_main){120'000, 0.000'001}; + /** The block hash of the chain tip at which transactions in m_recent_rejects and + * m_recent_rejects_reconsiderable were rejected. */ uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); /* @@ -907,6 +964,10 @@ class PeerManagerImpl final : public PeerManager CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex); + /** Get AncPkgInfo for a transaction. Returns std::nullopt if something is wrong and the + * node should be disconnected. */ + std::optional> MaybeGetAncPkgInfo(Peer& peer, const CTransactionRef& tx); + void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) LOCKS_EXCLUDED(::cs_main); @@ -934,9 +995,6 @@ class PeerManagerImpl final : public PeerManager /** Number of peers from which we're downloading blocks. */ int m_peers_downloading_from GUARDED_BY(cs_main) = 0; - /** Storage for orphan information */ - TxOrphanage m_orphanage; - void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. @@ -1409,6 +1467,53 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) } } +void PeerManagerImpl::AddOrphanAnnouncer(NodeId nodeid, const uint256& orphan_wtxid, const CTransactionRef& tx, std::chrono::microseconds current_time) +{ + AssertLockHeld(::cs_main); // For m_txrequest + const bool connected = m_connman.ForNode(nodeid, [](CNode* node) { return node->fSuccessfullyConnected && !node->fDisconnect; }); + if (!connected) return; + if (m_txpackagetracker->Count(nodeid) + m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) { + // Too many queued announcements. Request from a different peer. + // TODO: Allow peers with Relay permissions to bypass this restriction. + return; + } + const CNodeState* state = State(nodeid); + // Decide the TxRequestTracker parameters for this orphan resolution. + // TxPackageTracker may also increase the delay. + // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission) + // - "reqtime": current time plus delays for: + // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections + // - TXID_RELAY_DELAY for txid-based parent requests while package relay peers are available + // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least + // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight + // TODO: allow peers with Relay permissions to bypass this restiction + auto delay{0us}; + const bool preferred = state->fPreferredDownload; + if (!preferred) delay += NONPREF_PEER_TX_DELAY; + const bool overloaded = m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; + const auto peer_ref{GetPeerRef(nodeid)}; + if (!peer_ref) return; + if (!peer_ref->m_package_relay && m_package_relay_peers > 0) delay += TXID_RELAY_DELAY; + if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; + m_txpackagetracker->AddOrphanTx(nodeid, orphan_wtxid, tx, preferred, current_time + delay); +} +void PeerManagerImpl::AddOrphanResolutionCandidates(const CTransactionRef& orphan, NodeId originator) +{ + const auto current_time{GetTime()}; + + // The originator will not show up in GetCandidatePeers() since we already requested from them. + AddOrphanAnnouncer(originator, orphan->GetWitnessHash(), orphan, current_time); + // We prefer to request the orphan's ancestors via package relay rather than txids + // of missing inputs. Also, if the first request fails, we should try again. + // Get all peers that announced this transaction and prioritize accordingly... + for (const auto nodeid : m_txrequest.GetCandidatePeers(orphan->GetWitnessHash())) { + AddOrphanAnnouncer(nodeid, orphan->GetWitnessHash(), orphan, current_time); + } + for (const auto nodeid : m_txrequest.GetCandidatePeers(orphan->GetHash())) { + AddOrphanAnnouncer(nodeid, orphan->GetWitnessHash(), orphan, current_time); + } +} + void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) { AssertLockHeld(::cs_main); // For m_txrequest @@ -1497,7 +1602,9 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(peer != nullptr); misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); m_wtxid_relay_peers -= peer->m_wtxid_relay; + m_package_relay_peers -= peer->m_package_relay; assert(m_wtxid_relay_peers >= 0); + assert(m_package_relay_peers >= 0); } CNodeState *state = State(nodeid); assert(state != nullptr); @@ -1508,9 +1615,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) for (const QueuedBlock& entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.pindex->GetBlockHash()); } - m_orphanage.EraseForPeer(nodeid); + m_txpackagetracker->DisconnectedPeer(nodeid); m_txrequest.DisconnectedPeer(nodeid); if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); + if (m_txpackagetracker) m_txpackagetracker->DisconnectedPeer(nodeid); m_num_preferred_download_peers -= state->fPreferredDownload; m_peers_downloading_from -= (state->nBlocksInFlight != 0); assert(m_peers_downloading_from >= 0); @@ -1526,8 +1634,9 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(m_peers_downloading_from == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); assert(m_wtxid_relay_peers == 0); + assert(m_package_relay_peers == 0); assert(m_txrequest.Size() == 0); - assert(m_orphanage.Size() == 0); + assert(m_txpackagetracker->OrphanageSize() == 0); } } // cs_main if (node.fSuccessfullyConnected && misbehavior == 0 && @@ -1601,6 +1710,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c stats.m_fee_filter_received = 0; } + stats.m_package_relay = peer->m_package_relay; stats.m_ping_wait = ping_wait; stats.m_addr_processed = peer->m_addr_processed.load(); stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); @@ -1723,6 +1833,7 @@ bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationStat case TxValidationResult::TX_CONFLICT: case TxValidationResult::TX_MEMPOOL_POLICY: case TxValidationResult::TX_NO_MEMPOOL: + case TxValidationResult::TX_LOW_FEE: break; } if (message != "") { @@ -1780,27 +1891,31 @@ std::optional PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl std::unique_ptr PeerManager::make(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, - CTxMemPool& pool, bool ignore_incoming_txs) + CTxMemPool& pool, bool ignore_incoming_txs, + bool enable_package_relay) { - return std::make_unique(connman, addrman, banman, chainman, pool, ignore_incoming_txs); + return std::make_unique(connman, addrman, banman, chainman, pool, ignore_incoming_txs, enable_package_relay); } PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, - CTxMemPool& pool, bool ignore_incoming_txs) + CTxMemPool& pool, bool ignore_incoming_txs, + bool enable_package_relay) : m_chainparams(chainman.GetParams()), m_connman(connman), m_addrman(addrman), m_banman(banman), m_chainman(chainman), m_mempool(pool), - m_ignore_incoming_txs(ignore_incoming_txs) + m_ignore_incoming_txs(ignore_incoming_txs), + m_enable_package_relay{enable_package_relay} { // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. // This argument can go away after Erlay support is complete. if (gArgs.GetBoolArg("-txreconciliation", DEFAULT_TXRECONCILIATION_ENABLE)) { m_txreconciliation = std::make_unique(TXRECONCILIATION_VERSION); } + m_txpackagetracker = std::make_unique(); } void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) @@ -1825,7 +1940,7 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) */ void PeerManagerImpl::BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindex) { - m_orphanage.EraseForBlock(*pblock); + m_txpackagetracker->BlockConnected(*pblock); m_last_tip_update = GetTime(); { @@ -1999,7 +2114,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta // -bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) +bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_orphanage) { if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions @@ -2008,11 +2123,12 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) // txs a second chance. hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash(); m_recent_rejects.reset(); + m_recent_rejects_reconsiderable.reset(); } const uint256& hash = gtxid.GetHash(); - if (m_orphanage.HaveTx(gtxid)) return true; + if (include_orphanage && m_txpackagetracker->OrphanageHaveTx(gtxid)) return true; { LOCK(m_recent_confirmed_transactions_mutex); @@ -2278,6 +2394,31 @@ CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, return {}; } +std::optional> PeerManagerImpl::MaybeGetAncPkgInfo(Peer& peer, const CTransactionRef& tx) +{ + if (!peer.m_package_relay) { + return std::nullopt; + } + CTxMemPool::setEntries ancestors; + { + LOCK(m_mempool.cs); + auto txiter{m_mempool.GetIter(tx->GetHash())}; + if (txiter == std::nullopt) { + // This tx is no longer in mempool (maybe in mapRelay) + return std::vector({}); + } + ancestors = m_mempool.AssumeCalculateMemPoolAncestors(__func__, **txiter, CTxMemPool::Limits::NoLimits(), /*fSearchForParents=*/false); + // Otherwise the transaction will appear multiple times in the wtxids list. + Assume(ancestors.count(txiter.value()) == 0); + } + std::vector ancestor_package_wtxids; + std::transform(ancestors.cbegin(), ancestors.cend(), std::back_inserter(ancestor_package_wtxids), + [](const auto& entry){return entry->GetTx().GetWitnessHash();}); + // Last wtxid is the representative tx, even if it has no unconfirmed ancestors. + ancestor_package_wtxids.push_back(tx->GetWitnessHash()); + return ancestor_package_wtxids; +} + void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic& interruptMsgProc) { AssertLockNotHeld(cs_main); @@ -2295,7 +2436,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process // them. - while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { + while (it != peer.m_getdata_requests.end() && (it->IsGenTxMsg() || it->IsMsgAncPkgInfo())) { if (interruptMsgProc) return; // The send buffer provides backpressure. If there's no space in // the buffer, pause processing until the next call. @@ -2311,9 +2452,24 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv), mempool_req, now); if (tx) { - // WTX and WITNESS_TX imply we serialize with witness - int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); - m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); + if (inv.IsGenTxMsg()) { + // WTX and WITNESS_TX imply we serialize with witness + int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); + m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); + } else if (inv.IsMsgAncPkgInfo()) { + auto ancestor_wtxids = MaybeGetAncPkgInfo(peer, tx); + if (ancestor_wtxids == std::nullopt) { + pfrom.fDisconnect = true; + // No need to process the other requests if we are disconnecting the peer. + LogPrint(BCLog::NET, "\nDisconnecting peer %d -- requested ancpkginfo but not allowed\n", pfrom.GetId()); + return; + } else if (ancestor_wtxids->empty()) { + // Couldn't create the ancpkginfo for some reason, send a notfound. + vNotFound.push_back(inv); + continue; + } + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::ANCPKGINFO, ancestor_wtxids.value())); + } m_mempool.RemoveUnbroadcastTx(tx->GetHash()); // As we're going to send tx, make sure its unconfirmed parents are made requestable. std::vector parent_ids_to_add; @@ -2326,6 +2482,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic for (const CTxMemPoolEntry& parent : parents) { if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) { parent_ids_to_add.push_back(parent.GetTx().GetHash()); + parent_ids_to_add.push_back(parent.GetTx().GetWitnessHash()); } } } @@ -2917,7 +3074,7 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) CTransactionRef porphanTx = nullptr; - while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) { + while (CTransactionRef porphanTx = m_txpackagetracker->GetTxToReconsider(peer.m_id)) { const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); const TxValidationState& state = result.m_state; const uint256& orphanHash = porphanTx->GetHash(); @@ -2925,8 +3082,8 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(orphanHash, porphanTx->GetWitnessHash()); - m_orphanage.AddChildrenToWorkSet(*porphanTx); - m_orphanage.EraseTx(orphanHash); + m_txpackagetracker->AddChildrenToWorkSet(*porphanTx); + m_txpackagetracker->EraseOrphanTx(porphanTx->GetWitnessHash()); for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } @@ -2972,7 +3129,7 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) m_recent_rejects.insert(porphanTx->GetHash()); } } - m_orphanage.EraseTx(orphanHash); + m_txpackagetracker->EraseOrphanTx(porphanTx->GetWitnessHash()); return true; } } @@ -2980,6 +3137,78 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) return false; } +void PeerManagerImpl::ProcessPackage(CNode& node, const node::TxPackageTracker::PackageToValidate& package) +{ + AssertLockHeld(g_msgproc_mutex); + LOCK(cs_main); + // We won't re-validate the exact same transaction or package again. + if (m_recent_rejects_reconsiderable.contains(GetPackageHash(package.m_unvalidated_txns))) { + // Should we do anything else here? + return; + } + const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, + package.m_unvalidated_txns, /*test_accept=*/false)}; + if (package_result.m_state.IsInvalid()) { + // If another peer sends the same packageinfo again, we can immediately reject it without + // re-downloading the transactions. Note that state.IsInvalid() doesn't mean all + // transactions have been rejected. + m_recent_rejects_reconsiderable.insert(package.m_pkginfo_hash); + } + std::set successful_txns; + std::set invalid_final_txns; + for (const auto& tx : package.m_unvalidated_txns) { + const auto& txid = tx->GetHash(); + const auto& wtxid = tx->GetWitnessHash(); + const auto result{package_result.m_tx_results.find(wtxid)}; + if (package_result.m_state.IsValid() || + package_result.m_state.GetResult() == PackageValidationResult::PCKG_TX) { + // If PCKG_TX or valid, every tx should have a result. + Assume(result != package_result.m_tx_results.end()); + } + if (result == package_result.m_tx_results.end()) break; + if (result->second.m_result_type == MempoolAcceptResult::ResultType::VALID) { + LogPrint(BCLog::MEMPOOL, "\nProcessPackage: tx %s from peer=%d accepted\n", txid.ToString(), node.GetId()); + successful_txns.insert(wtxid); + m_txrequest.ForgetTxHash(txid); + m_txrequest.ForgetTxHash(wtxid); + RelayTransaction(txid, wtxid); + node.m_last_tx_time = GetTime(); + for (const CTransactionRef& removedTx : result->second.m_replaced_transactions.value()) { + AddToCompactExtraTransactions(removedTx); + } + } else if (result->second.m_state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { + if (result->second.m_state.GetResult() == TxValidationResult::TX_LOW_FEE) { + m_recent_rejects_reconsiderable.insert(wtxid); + // FIXME: also cache subpackage failure + } else { + m_recent_rejects.insert(wtxid); + if (result->second.m_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && wtxid != txid) { + m_recent_rejects.insert(txid); + m_txrequest.ForgetTxHash(txid); + } else if (result->second.m_state.GetResult() == TxValidationResult::TX_CONSENSUS) { + invalid_final_txns.insert(wtxid); + } + } + m_txrequest.ForgetTxHash(wtxid); + if (RecursiveDynamicUsage(*tx) < 100000) { + AddToCompactExtraTransactions(tx); + } + LogPrint(BCLog::MEMPOOLREJ, "\nProcessPackage: %s from peer=%d was not accepted: %s\n", + wtxid.ToString(), node.GetId(), result->second.m_state.ToString()); + MaybePunishNodeForTx(wtxid == package.m_rep_wtxid ? node.GetId() : package.m_info_provider, result->second.m_state); + } + m_txpackagetracker->EraseOrphanTx(wtxid); + } + m_txpackagetracker->FinalizeTransactions(successful_txns, invalid_final_txns); + // Do this last to avoid adding children that were already validated within this package. + for (const auto& tx : package.m_unvalidated_txns) { + auto iter{package_result.m_tx_results.find(tx->GetWitnessHash())}; + if (iter != package_result.m_tx_results.end() && iter->second.m_result_type == MempoolAcceptResult::ResultType::VALID) { + m_txpackagetracker->AddChildrenToWorkSet(*tx); + } + } +} + bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, BlockFilterType filter_type, uint32_t start_height, const uint256& stop_hash, uint32_t max_height_diff, @@ -3256,6 +3485,18 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (greatest_common_version >= WTXID_RELAY_VERSION) { m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY)); + if (m_enable_package_relay) { + m_txpackagetracker->ReceivedVersion(peer->m_id); + if (!m_ignore_incoming_txs) { + // Always send a sendpackages for each version we support if: + // - Protocol version is at least WTXID_RELAY_VERSION + // - We have package relay enabled + // - We are not in blocksonly mode. + for (const auto version : m_txpackagetracker->GetVersions()) { + m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDPACKAGES, version)); + } + } + } } // Signal ADDRv2 support (BIP155). @@ -3443,6 +3684,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, tx_relay->m_next_inv_send_time == 0s)); } + if (m_enable_package_relay && m_txpackagetracker->ReceivedVerack(peer->m_id, pfrom.m_relays_txs, peer->m_wtxid_relay)) { + peer->m_package_relay = true; + m_package_relay_peers++; + } pfrom.fSuccessfullyConnected = true; return; } @@ -3470,6 +3715,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + if (msg_type == NetMsgType::SENDPACKAGES) { + if (m_enable_package_relay) { + if (pfrom.fSuccessfullyConnected) { + // Disconnect peers that send a SENDPACKAGES message after VERACK. + LogPrint(BCLog::NET, "sendpackages received after verack from peer=%d; disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + uint32_t sendpackages_version{0}; + vRecv >> sendpackages_version; + m_txpackagetracker->ReceivedSendpackages(peer->m_id, sendpackages_version); + } else { + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendpackages from peer=%d ignored, as our node does not have package relay enabled\n", pfrom.GetId()); + } + return; + } + // BIP339 defines feature negotiation of wtxidrelay, which must happen between // VERSION and VERACK to avoid relay problems from switching after a connection is up. if (msg_type == NetMsgType::WTXIDRELAY) { @@ -3718,6 +3980,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { AddTxAnnouncement(pfrom, gtxid, current_time); } + if (inv.IsMsgWtx() && m_txpackagetracker->OrphanageHaveTx(gtxid) && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { + AddOrphanAnnouncer(pfrom.GetId(), inv.hash, nullptr, current_time); + } } else { LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); } @@ -3977,6 +4242,75 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + if (msg_type == NetMsgType::ANCPKGINFO) { + std::vector package_wtxids; + vRecv >> package_wtxids; + if (package_wtxids.empty()) return; + if (!peer->m_package_relay) { + LogPrint(BCLog::NET, "ancpkginfo sent in violation of protocol, disconnecting peer=%d\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + if (!m_txpackagetracker->PkgInfoAllowed(pfrom.GetId(), package_wtxids.back(), node::RECEIVER_INIT_ANCESTOR_PACKAGES)) { + LogPrint(BCLog::NET, "unsolicited ancpkginfo sent, disconnecting peer=%d\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + + // Note: Multiple ancestor packages can have the same representative (different ancestors for + // honest or malicious reasons). But since we're only using this to resolve orphans, we + // should never be in a situation where we have multiple ancpkginfos out for the same wtxid + const auto& rep_wtxid{package_wtxids.back()}; + if (package_wtxids.size() > MAX_PACKAGE_COUNT) { + LogPrint(BCLog::NET, "discarding package info for tx %s, too many transactions\n", rep_wtxid.ToString()); + // FIXME: disconnect? + m_txpackagetracker->ForgetPkgInfo(pfrom.GetId(), package_wtxids.back(), node::RECEIVER_INIT_ANCESTOR_PACKAGES); + return; + } + LOCK(::cs_main); + if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) return; + // We have already validated this exact set of transactions recently, so don't do it again. + if (m_recent_rejects_reconsiderable.contains(GetCombinedHash(package_wtxids))) { + LogPrint(BCLog::NET, "discarding package info for tx %s, this package has already been rejected\n", + rep_wtxid.ToString()); + m_txpackagetracker->ForgetPkgInfo(pfrom.GetId(), package_wtxids.back(), node::RECEIVER_INIT_ANCESTOR_PACKAGES); + return; + } + for (const auto& wtxid : package_wtxids) { + // If a transaction is in m_recent_rejects and not m_recent_rejects_reconsiderable, that + // means it will not become valid by adding another transaction. + if (m_recent_rejects.contains(wtxid)) { + LogPrint(BCLog::NET, + "discarding package for tx %s, tx %s has already been rejected and is not eligible for reconsideration\n", + rep_wtxid.ToString(), wtxid.ToString()); + m_txpackagetracker->ForgetPkgInfo(pfrom.GetId(), package_wtxids.back(), node::RECEIVER_INIT_ANCESTOR_PACKAGES); + return; + } + } + std::map txdata_status; + std::vector pkgtxns_to_request; + for (const auto& wtxid : package_wtxids) { + AddKnownTx(*peer, wtxid); + if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_orphanage=*/true)) { + txdata_status.emplace(wtxid, false); + } else { + txdata_status.emplace(wtxid, true); + pkgtxns_to_request.push_back(wtxid); + } + } + const auto current_time{GetTime()}; + // FIXME: could be empty if we already have all transactions in orphanage... + if (!pkgtxns_to_request.empty()) { + // It is correct to continue asking another peer for ancpkginfo because this peer could + // have provided a false list of ancestors in order to get us to reject the tx. + Assume(txdata_status.size() == package_wtxids.size()); + m_txpackagetracker->ReceivedAncPkgInfo(pfrom.GetId(), rep_wtxid, txdata_status, pkgtxns_to_request, + current_time + GETDATA_TX_INTERVAL); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETPKGTXNS, pkgtxns_to_request)); + } + return; + } + if (msg_type == NetMsgType::TX) { if (RejectIncomingTxs(pfrom)) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); @@ -4048,7 +4382,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); - m_orphanage.AddChildrenToWorkSet(tx); + m_txpackagetracker->FinalizeTransactions({tx.GetWitnessHash()}, {}); + m_txpackagetracker->AddChildrenToWorkSet(tx); pfrom.m_last_tx_time = GetTime(); @@ -4064,38 +4399,18 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected - - // Deduplicate parent txids, so that we don't have to loop over - // the same parent txid more than once down below. - std::vector unique_parents; - unique_parents.reserve(tx.vin.size()); - for (const CTxIn& txin : tx.vin) { - // We start with all parents, and then remove duplicates below. - unique_parents.push_back(txin.prevout.hash); - } - std::sort(unique_parents.begin(), unique_parents.end()); - unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); - for (const uint256& parent_txid : unique_parents) { + for (const auto& input : tx.vin) { + const auto& parent_txid = input.prevout.hash; + AddKnownTx(*peer, parent_txid); if (m_recent_rejects.contains(parent_txid)) { fRejectedParents = true; break; } } if (!fRejectedParents) { - const auto current_time{GetTime()}; - - for (const uint256& parent_txid : unique_parents) { - // Here, we only have the txid (and not wtxid) of the - // inputs, so we only request in txid mode, even for - // wtxidrelay peers. - // Eventually we should replace this with an improved - // protocol for getting all unconfirmed parents. - const auto gtxid{GenTxid::Txid(parent_txid)}; - AddKnownTx(*peer, parent_txid); - if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time); - } - - if (m_orphanage.AddTx(ptx, pfrom.GetId())) { + const bool had_before{m_txpackagetracker->OrphanageHaveTx(GenTxid::Wtxid(ptx->GetWitnessHash()))}; + AddOrphanResolutionCandidates(ptx, pfrom.GetId()); + if (!had_before && m_txpackagetracker->OrphanageHaveTx(GenTxid::Wtxid(ptx->GetWitnessHash()))) { AddToCompactExtraTransactions(ptx); } @@ -4105,7 +4420,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetIntArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); - m_orphanage.LimitOrphans(nMaxOrphanTx); + m_txpackagetracker->LimitOrphans(nMaxOrphanTx); } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); // We will continue to reject this tx since it has rejected @@ -4134,7 +4449,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - m_recent_rejects.insert(tx.GetWitnessHash()); + if (state.GetResult() == TxValidationResult::TX_LOW_FEE) { + m_recent_rejects_reconsiderable.insert(tx.GetWitnessHash()); + } else { + m_recent_rejects.insert(tx.GetWitnessHash()); + } m_txrequest.ForgetTxHash(tx.GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy @@ -4624,6 +4943,65 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + if (msg_type == NetMsgType::GETPKGTXNS) { + unsigned int num_txns = ReadCompactSize(vRecv); + if (num_txns == 0 || num_txns > MAX_PACKAGE_COUNT) { + Misbehaving(*peer, 100, strprintf("getpkgtxns size = %u", num_txns)); + return; + } + std::vector txns_requested; + txns_requested.resize(num_txns); + for (unsigned int n = 0; n < num_txns; ++n) vRecv >> txns_requested[n]; + + { + LOCK(peer->m_getdata_requests_mutex); + std::vector pkgtxns; + auto tx_relay = peer->GetTxRelay(); + const auto mempool_req = tx_relay != nullptr ? tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min(); + const auto now{GetTime()}; + for (const auto& wtxid : txns_requested) { + auto ptx = FindTxForGetData(*tx_relay, GenTxid::Wtxid(wtxid), mempool_req, now); + if (ptx) { + pkgtxns.push_back(ptx); + } else { + // A getpkgtxns request is all or nothing; if any of the transactions are + // unavailable, return a notfound for the full request. + break; + } + } + if (pkgtxns.size() == txns_requested.size()) { + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PKGTXNS, pkgtxns)); + } else { + std::vector notfound{{CInv{MSG_PKGTXNS, GetCombinedHash(txns_requested)}}}; + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, notfound)); + } + } + } + + if (msg_type == NetMsgType::PKGTXNS) { + if (RejectIncomingTxs(pfrom)) { + LogPrint(BCLog::NET, "\npkgtxns sent in violation of protocol peer=%d\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + if (!m_txpackagetracker) return; + unsigned int num_txns = ReadCompactSize(vRecv); + if (num_txns == 0) return; + if (num_txns > MAX_PACKAGE_COUNT) { + Misbehaving(*peer, 100, strprintf("pkgtxns size = %u", num_txns)); + return; + } + std::vector package_txns; + package_txns.resize(num_txns); + for (unsigned int n = 0; n < num_txns; n++) { + vRecv >> package_txns[n]; + } + if (const auto package_to_validate{m_txpackagetracker->ReceivedPkgTxns(pfrom.GetId(), package_txns)}) { + ProcessPackage(pfrom, package_to_validate.value()); + } + return; + } + if (msg_type == NetMsgType::PING) { if (pfrom.GetCommonVersion() > BIP0031_VERSION) { uint64_t nonce = 0; @@ -4808,6 +5186,15 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as // completed in TxRequestTracker. m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash); + } else if (inv.IsMsgAncPkgInfo() && m_enable_package_relay) { + if (!m_txpackagetracker->PkgInfoAllowed(pfrom.GetId(), inv.hash, node::RECEIVER_INIT_ANCESTOR_PACKAGES)) { + LogPrint(BCLog::NET, "\nUnsolicited ancpkginfo sent, disconnecting peer=%d\n", pfrom.GetId()); + // Unsolicited ancpkginfo or peer is not registered for package relay. + pfrom.fDisconnect = true; + } + m_txpackagetracker->ForgetPkgInfo(pfrom.GetId(), inv.hash, node::RECEIVER_INIT_ANCESTOR_PACKAGES); + } else if (inv.IsMsgPkgTxns() && m_enable_package_relay) { + m_txpackagetracker->ReceivedNotFound(pfrom.GetId(), inv.hash); } } } @@ -4925,7 +5312,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic& interrupt // by another peer that was already processed; in that case, // the extra work may not be noticed, possibly resulting in an // unnecessary 100ms delay) - if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true; + if (m_txpackagetracker->HaveTxToReconsider(peer->m_id)) fMoreWork = true; } catch (const std::exception& e) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); } catch (...) { @@ -5683,6 +6070,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Peer told you to not send transactions at that feerate? Don't bother sending it. if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; + } else if (!peer->m_package_relay) { + // If any of the parents are below the fee filter and the peer doesn't + // support package relay, they probably won't accept this transaction. + // Save older peers' bandwidth by skipping this transaction. + if (auto min_parent_feerate{m_mempool.MinimumFeerateWithParents(ToGenTxid(inv))}) { + if (min_parent_feerate.value() < filterrate) continue; + } } if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; // Send @@ -5817,7 +6211,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } // - // Message: getdata (transactions) + // Message: getdata (transactions and ancpkginfo) // std::vector> expired; auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired); @@ -5842,6 +6236,18 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } } + auto requestable_orphans = m_txpackagetracker->GetOrphanRequests(pto->GetId(), current_time); + for (const auto& gtxid : requestable_orphans) { + if (AlreadyHaveTx(gtxid, /*include_orphanage=*/false)) { + m_txpackagetracker->FinalizeTransactions({gtxid.GetHash()}, {}); + } else { + vGetData.emplace_back(gtxid.IsWtxid() ? MSG_ANCPKGINFO : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); + if (vGetData.size() >= MAX_GETDATA_SZ) { + m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + vGetData.clear(); + } + } + } if (!vGetData.empty()) m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); diff --git a/src/net_processing.h b/src/net_processing.h index af9a02139b14a..66aa05cb5786c 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -30,6 +30,7 @@ struct CNodeStateStats { std::chrono::microseconds m_ping_wait; std::vector vHeightInFlight; bool m_relay_txs; + bool m_package_relay; CAmount m_fee_filter_received; uint64_t m_addr_processed = 0; uint64_t m_addr_rate_limited = 0; @@ -43,7 +44,8 @@ class PeerManager : public CValidationInterface, public NetEventsInterface public: static std::unique_ptr make(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, - CTxMemPool& pool, bool ignore_incoming_txs); + CTxMemPool& pool, bool ignore_incoming_txs, + bool enable_package_relay); virtual ~PeerManager() { } /** diff --git a/src/node/txpackagetracker.cpp b/src/node/txpackagetracker.cpp new file mode 100644 index 0000000000000..133552f8f9619 --- /dev/null +++ b/src/node/txpackagetracker.cpp @@ -0,0 +1,548 @@ +// Copyright (c) 2022 +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include +#include + +namespace node { + /** How long to wait before requesting orphan ancpkginfo/parents from an additional peer. + * Same as GETDATA_TX_INTERVAL. */ + static constexpr auto ORPHAN_ANCESTOR_GETDATA_INTERVAL{60s}; + + /** Delay to add if an orphan resolution candidate is already using a lot of memory in the + * orphanage. */ + static constexpr auto ORPHANAGE_OVERLOAD_DELAY{2s}; + +class TxPackageTracker::Impl { + /** Manages unvalidated tx data (orphan transactions for which we are downloading ancestors). */ + TxOrphanage m_orphanage; + + mutable Mutex m_mutex; + struct RegistrationState { + // All of the following bools will need to be true + /** Whether this peer allows transaction relay from us. */ + bool m_txrelay{true}; + // Whether this peer sent a BIP339 wtxidrelay message. + bool m_wtxid_relay{false}; + /** Whether this peer says they can do package relay. */ + bool m_sendpackages_received{false}; + /** Versions of package relay supported by this node. + * This is a subset of PACKAGE_RELAY_SUPPORTED_VERSIONS. */ + std::set m_versions_in_common; + bool CanRelayPackages() { + return m_txrelay && m_wtxid_relay && m_sendpackages_received; + } + }; + /** Represents AncPkgInfo for which we are missing transaction data. */ + struct PackageToDownload { + /** Who provided the ancpkginfo - this is the peer whose work queue to add this package when + * all tx data is received. We expect to receive tx data from this peer. */ + const NodeId m_pkginfo_provider; + + /** When to stop trying to download this package if we haven't received tx data yet. */ + std::chrono::microseconds m_expiry; + + /** Representative wtxid, i.e. the orphan in an ancestor package. */ + const uint256 m_rep_wtxid; + + /** Map from wtxid to status (true indicates it is missing). This can be expanded to further + * states such as "already in mempool/confirmed" in the future. */ + std::map m_txdata_status; + + // Package info without wtxids doesn't make sense. + PackageToDownload() = delete; + // Constructor if you already know size. + PackageToDownload(NodeId nodeid, + std::chrono::microseconds expiry, + const uint256& rep_wtxid, + const std::map& txdata_status) : + m_pkginfo_provider{nodeid}, + m_expiry{expiry}, + m_rep_wtxid{rep_wtxid}, + m_txdata_status{txdata_status} + {} + // Returns true if any tx data is still needed. + bool MissingTxData() { + return std::any_of(m_txdata_status.cbegin(), m_txdata_status.cend(), + [](const auto pair){return pair.second;}); + } + void UpdateStatusAndCheckSize(const CTransactionRef& tx) { + auto map_iter = m_txdata_status.find(tx->GetWitnessHash()); + if (map_iter != m_txdata_status.end()) map_iter->second = false; + } + bool HasTransactionIn(const std::set& wtxidset) const { + for (const auto& keyval : m_txdata_status) { + if (wtxidset.count(keyval.first) > 0) return true; + } + return false; + } + /** Returns wtxid of representative transaction (i.e. the orphan in an ancestor package). */ + const uint256 RepresentativeWtxid() const { return m_rep_wtxid; } + /** Combined hash of all wtxids in package. */ + const uint256 GetPackageHash() const { + std::vector all_wtxids; + std::transform(m_txdata_status.cbegin(), m_txdata_status.cend(), std::back_inserter(all_wtxids), + [](const auto& mappair) { return mappair.first; }); + return GetCombinedHash(all_wtxids); + } + }; + + using PackageInfoRequestId = uint256; + PackageInfoRequestId GetPackageInfoRequestId(NodeId nodeid, const uint256& wtxid, uint32_t version) { + return (CHashWriter(SER_GETHASH, 0) << nodeid << wtxid << version).GetHash(); + } + using PackageTxnsRequestId = uint256; + PackageTxnsRequestId GetPackageTxnsRequestId(NodeId nodeid, const std::vector& wtxids) { + return (CHashWriter(SER_GETHASH, 0) << nodeid << GetCombinedHash(wtxids)).GetHash(); + } + PackageTxnsRequestId GetPackageTxnsRequestId(NodeId nodeid, const std::vector& pkgtxns) { + return (CHashWriter(SER_GETHASH, 0) << nodeid << GetPackageHash(pkgtxns)).GetHash(); + } + PackageTxnsRequestId GetPackageTxnsRequestId(NodeId nodeid, const uint256& combinedhash) { + return (CHashWriter(SER_GETHASH, 0) << nodeid << combinedhash).GetHash(); + } + /** List of all ancestor package info we're currently requesting txdata for, indexed by the + * nodeid and getpkgtxns request we would have sent them. */ + std::map pending_package_info GUARDED_BY(m_mutex); + + using PendingMap = decltype(pending_package_info); + struct IteratorComparator { + template + bool operator()(const I& a, const I& b) const { return &(*a) < &(*b); } + }; + + struct PeerInfo { + // What package versions we agreed to relay. + std::set m_versions_supported; + bool SupportsVersion(uint32_t version) { return m_versions_supported.count(version) > 0; } + + std::set m_package_info_provided; + }; + + /** Stores relevant information about the peer prior to verack. Upon completion of version + * handshake, we use this information to decide whether we relay packages with this peer. */ + std::map registration_states GUARDED_BY(m_mutex); + + /** Information for each peer we relay packages with. Membership in this map is equivalent to + * whether or not we relay packages with a peer. */ + std::map info_per_peer GUARDED_BY(m_mutex); + + /** Tracks orphans for which we need to request ancestor information. All hashes stored are + * wtxids, i.e., the wtxid of the orphan. However, the is_wtxid field is used to indicate + * whether we would request the ancestor information by wtxid (via package relay) or by txid + * (via prevouts of the missing inputs). */ + TxRequestTracker orphan_request_tracker GUARDED_BY(m_mutex); + + /** Cache of package info requests sent. Used to identify unsolicited package info messages. */ + CRollingBloomFilter packageinfo_requested GUARDED_BY(m_mutex){50000, 0.000001}; + +public: + Impl() = default; + + // Orphanage Wrapper Functions + bool OrphanageHaveTx(const GenTxid& gtxid) { return m_orphanage.HaveTx(gtxid); } + CTransactionRef GetTxToReconsider(NodeId peer) { return m_orphanage.GetTxToReconsider(peer); } + int EraseOrphanTx(const uint256& txid) { return m_orphanage.EraseTx(txid); } + void DisconnectedPeer(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + if (auto it{registration_states.find(nodeid)}; it != registration_states.end()) { + registration_states.erase(it); + } + if (auto it{info_per_peer.find(nodeid)}; it != info_per_peer.end()) { + for (const auto& pkginfo_iter : it->second.m_package_info_provided) { + it->second.m_package_info_provided.erase(pkginfo_iter); + pending_package_info.erase(pkginfo_iter); + } + info_per_peer.erase(it); + } + orphan_request_tracker.DisconnectedPeer(nodeid); + m_orphanage.EraseForPeer(nodeid); + } + void BlockConnected(const CBlock& block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + const auto wtxids_erased{m_orphanage.EraseForBlock(block)}; + std::set block_wtxids; + std::set conflicted_wtxids; + for (const CTransactionRef& ptx : block.vtx) { + block_wtxids.insert(ptx->GetWitnessHash()); + } + for (const auto& wtxid : wtxids_erased) { + if (block_wtxids.count(wtxid) == 0) { + conflicted_wtxids.insert(wtxid); + } + } + FinalizeTransactions(block_wtxids, conflicted_wtxids); + } + void LimitOrphans(unsigned int max_orphans) { m_orphanage.LimitOrphans(max_orphans); } + void AddChildrenToWorkSet(const CTransaction& tx) { m_orphanage.AddChildrenToWorkSet(tx); } + bool HaveTxToReconsider(NodeId peer) { return m_orphanage.HaveTxToReconsider(peer); } + size_t OrphanageSize() { return m_orphanage.Size(); } + + void ReceivedVersion(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + if (registration_states.find(nodeid) != registration_states.end()) return; + registration_states.insert(std::make_pair(nodeid, RegistrationState{})); + } + void ReceivedSendpackages(NodeId nodeid, uint32_t version) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + const auto it = registration_states.find(nodeid); + if (it == registration_states.end()) return; + it->second.m_sendpackages_received = true; + // Ignore versions we don't understand. + if (std::count(PACKAGE_RELAY_SUPPORTED_VERSIONS.cbegin(), PACKAGE_RELAY_SUPPORTED_VERSIONS.cend(), version)) { + it->second.m_versions_in_common.insert(version); + } + } + + bool ReceivedVerack(NodeId nodeid, bool txrelay, bool wtxidrelay) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + const auto& it = registration_states.find(nodeid); + if (it == registration_states.end()) return false; + it->second.m_txrelay = txrelay; + it->second.m_wtxid_relay = wtxidrelay; + const bool final_state = it->second.CanRelayPackages(); + if (final_state) { + auto [peerinfo_it, success] = info_per_peer.insert(std::make_pair(nodeid, PeerInfo{})); + peerinfo_it->second.m_versions_supported = it->second.m_versions_in_common; + } + registration_states.erase(it); + return final_state; + } + + void AddOrphanTx(NodeId nodeid, const uint256& wtxid, const CTransactionRef& tx, bool is_preferred, std::chrono::microseconds reqtime) + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + // Skip if we weren't provided the tx and can't find the wtxid in the orphanage. + if (tx == nullptr && !m_orphanage.HaveTx(GenTxid::Wtxid(wtxid))) return; + + // Skip if already requested in the (recent-ish) past. + if (packageinfo_requested.contains(GetPackageInfoRequestId(nodeid, wtxid, RECEIVER_INIT_ANCESTOR_PACKAGES))) return; + + // Add delay to the reqtime if this peer is already using a lot of orphanage space. + if (m_orphanage.IsOverloaded(nodeid)) reqtime += ORPHANAGE_OVERLOAD_DELAY; + + auto it_peer_info = info_per_peer.find(nodeid); + if (it_peer_info != info_per_peer.end() && it_peer_info->second.SupportsVersion(RECEIVER_INIT_ANCESTOR_PACKAGES)) { + // Package relay peer: is_wtxid=true because we will be requesting via ancpkginfo. + orphan_request_tracker.ReceivedInv(nodeid, GenTxid::Wtxid(wtxid), is_preferred, reqtime); + } else { + // Even though this stores the orphan wtxid, is_wtxid=false because we will be requesting the parents via txid. + orphan_request_tracker.ReceivedInv(nodeid, GenTxid::Txid(wtxid), is_preferred, reqtime); + } + + if (tx != nullptr) { + m_orphanage.AddTx(tx, nodeid); + } else { + m_orphanage.AddTx(m_orphanage.GetTx(wtxid), nodeid); + } + + } + size_t CountInFlight(NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + auto count{orphan_request_tracker.CountInFlight(nodeid)}; + if (auto it{info_per_peer.find(nodeid)}; it != info_per_peer.end()) { + count += it->second.m_package_info_provided.size(); + } + return count; + } + size_t Count(NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + auto count{orphan_request_tracker.Count(nodeid)}; + if (auto it{info_per_peer.find(nodeid)}; it != info_per_peer.end()) { + count += it->second.m_package_info_provided.size(); + } + return count; + } + + void ExpirePackageToDownload(NodeId nodeid, std::chrono::microseconds current_time) + EXCLUSIVE_LOCKS_REQUIRED(m_mutex) + { + AssertLockHeld(m_mutex); + auto peer_info_it = info_per_peer.find(nodeid); + if (peer_info_it == info_per_peer.end()) return; + std::set to_expire; + for (const auto& pkginfo_iter : peer_info_it->second.m_package_info_provided) { + const auto& packageinfo = pkginfo_iter->second; + if (packageinfo.m_expiry < current_time) { + LogPrint(BCLog::TXPACKAGES, "\nExpiring package info for tx %s from peer=%d\n", + packageinfo.RepresentativeWtxid().ToString(), nodeid); + m_orphanage.EraseOrphanOfPeer(pkginfo_iter->second.m_rep_wtxid, nodeid); + to_expire.insert(pkginfo_iter->first); + } + } + for (const auto& packageid : to_expire) { + auto pending_iter = pending_package_info.find(packageid); + Assume(pending_iter != pending_package_info.end()); + if (pending_iter != pending_package_info.end()) { + peer_info_it->second.m_package_info_provided.erase(pending_iter); + pending_package_info.erase(pending_iter); + } + } + } + std::vector GetOrphanRequests(NodeId nodeid, std::chrono::microseconds current_time) + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + // Expire packages we were trying to download tx data for + ExpirePackageToDownload(nodeid, current_time); + std::vector> expired; + auto tracker_requestable = orphan_request_tracker.GetRequestable(nodeid, current_time, &expired); + for (const auto& entry : expired) { + LogPrint(BCLog::TXPACKAGES, "\nTimeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "ancpkginfo" : "orphan parent", + entry.second.GetHash().ToString(), entry.first); + } + // Get getdata requests we should send + std::vector results; + for (const auto& gtxid : tracker_requestable) { + if (gtxid.IsWtxid()) { + Assume(info_per_peer.find(nodeid) != info_per_peer.end()); + // Add the orphan's wtxid as-is. + LogPrint(BCLog::TXPACKAGES, "\nResolving orphan %s, requesting by ancpkginfo from peer=%d\n", gtxid.GetHash().ToString(), nodeid); + results.emplace_back(gtxid); + packageinfo_requested.insert(GetPackageInfoRequestId(nodeid, gtxid.GetHash(), RECEIVER_INIT_ANCESTOR_PACKAGES)); + orphan_request_tracker.RequestedTx(nodeid, gtxid.GetHash(), current_time + ORPHAN_ANCESTOR_GETDATA_INTERVAL); + } else { + LogPrint(BCLog::TXPACKAGES, "\nResolving orphan %s, requesting by txids of parents from peer=%d\n", gtxid.GetHash().ToString(), nodeid); + const auto ptx = m_orphanage.GetTx(gtxid.GetHash()); + if (!ptx) { + // We can't request ancpkginfo and we have no way of knowing what the missing + // parents are (it could also be that the orphan has already been resolved). + // Give up. + orphan_request_tracker.ForgetTxHash(gtxid.GetHash()); + LogPrint(BCLog::TXPACKAGES, "\nForgetting orphan %s from peer=%d\n", gtxid.GetHash().ToString(), nodeid); + continue; + } + // Add the orphan's parents. Net processing will filter out what we already have. + // Deduplicate parent txids, so that we don't have to loop over + // the same parent txid more than once down below. + std::vector unique_parents; + unique_parents.reserve(ptx->vin.size()); + for (const auto& txin : ptx->vin) { + // We start with all parents, and then remove duplicates below. + unique_parents.push_back(txin.prevout.hash); + } + std::sort(unique_parents.begin(), unique_parents.end()); + unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); + for (const auto& txid : unique_parents) { + results.emplace_back(GenTxid::Txid(txid)); + } + // Mark the orphan as requested + orphan_request_tracker.RequestedTx(nodeid, gtxid.GetHash(), current_time + ORPHAN_ANCESTOR_GETDATA_INTERVAL); + } + } + if (!results.empty()) LogPrint(BCLog::TXPACKAGES, "\nRequesting %u items from peer=%d\n", results.size(), nodeid); + return results; + } + void FinalizeTransactions(const std::set& valid, const std::set& invalid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + // Do a linear search of all packages. This operation should not be expensive as we don't + // expect to be relaying more than 1 package per peer. Nonetheless, process sets together + // to be more efficient. + std::set to_erase; + for (const auto& [packageid, packageinfo] : pending_package_info) { + const auto& rep_wtxid = packageinfo.RepresentativeWtxid(); + if (valid.count(rep_wtxid) > 0 || invalid.count(rep_wtxid) > 0) { + // We have already made a final decision on the transaction of interest. + // There is no need to request more information from other peers. + to_erase.insert(packageid); + orphan_request_tracker.ForgetTxHash(rep_wtxid); + } else if (packageinfo.HasTransactionIn(invalid)) { + // This package info is known to contain an invalid transaction; don't continue + // trying to download or validate it. + to_erase.insert(packageid); + // However, as it's possible for this information to be incorrect (e.g. a peer + // purposefully trying to get us to reject the orphan by providing package info + // containing an invalid transaction), don't prevent further orphan resolution + // attempts with other peers. + } else { + // FIXME: Some packages may need less txdata now. + // It's fine not to do this *for now* since we always request all missing txdata + // from the same peer. + } + } + for (const auto& packageid : to_erase) { + auto pending_iter = pending_package_info.find(packageid); + Assume(pending_iter != pending_package_info.end()); + if (pending_iter != pending_package_info.end()) { + auto peer_info_it = info_per_peer.find(pending_iter->second.m_pkginfo_provider); + Assume(peer_info_it != info_per_peer.end()); + if (peer_info_it != info_per_peer.end()) { + peer_info_it->second.m_package_info_provided.erase(pending_iter); + } + pending_package_info.erase(pending_iter); + } + } + } + bool PkgInfoAllowed(NodeId nodeid, const uint256& wtxid, uint32_t version) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + if (info_per_peer.find(nodeid) == info_per_peer.end()) { + return false; + } + auto peer_info = info_per_peer.find(nodeid)->second; + const auto packageid{GetPackageInfoRequestId(nodeid, wtxid, version)}; + if (!packageinfo_requested.contains(packageid)) { + return false; + } + // They already responded to this request. + for (const auto& pkginfo_iter : peer_info.m_package_info_provided) { + if (wtxid == pkginfo_iter->second.m_rep_wtxid) return false; + } + return true; + } + void ForgetPkgInfo(NodeId nodeid, const uint256& rep_wtxid, uint32_t pkginfo_version) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + if (pkginfo_version == RECEIVER_INIT_ANCESTOR_PACKAGES) { + orphan_request_tracker.ReceivedResponse(nodeid, rep_wtxid); + } + } + + bool ReceivedAncPkgInfo(NodeId nodeid, const uint256& rep_wtxid, const std::map& txdata_status, + const std::vector& missing_wtxids, std::chrono::microseconds expiry) + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + auto peer_info_it = info_per_peer.find(nodeid); + if (peer_info_it == info_per_peer.end()) return true; + // We haven't fully resolved this orphan yet - we still need to download the txdata for each + // ancestor - so don't call ForgetTxHash(), as it is not guaranteed we will get all the + // information from this peer. Also don't call ReceivedResponse(), as doing so would trigger + // the orphan_request_tracker to select other candidate peers for orphan resolution. Stay + // in the REQUESTED, not COMPLETED, state. + // + // Instead, reset the timeout (another ORPHAN_ANCESTOR_GETDATA_INTERVAL) to give this peer + // more time to respond to our second round of requests. After that timeout, the + // orphan_request_tracker will select additional candidate peers for orphan resolution. + orphan_request_tracker.ResetRequestTimeout(nodeid, rep_wtxid, ORPHAN_ANCESTOR_GETDATA_INTERVAL); + const auto pkgtxnsid{GetPackageTxnsRequestId(nodeid, missing_wtxids)}; + const auto [it, success] = pending_package_info.emplace(pkgtxnsid, + PackageToDownload{nodeid, expiry, rep_wtxid, txdata_status}); + peer_info_it->second.m_package_info_provided.emplace(it); + return false; + } + void ReceivedNotFound(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + auto peer_info_it = info_per_peer.find(nodeid); + if (peer_info_it == info_per_peer.end()) return; + const auto pending_iter{pending_package_info.find(GetPackageTxnsRequestId(nodeid, hash))}; + if (pending_iter != pending_package_info.end()) { + auto& pendingpackage{pending_iter->second}; + LogPrint(BCLog::TXPACKAGES, "\nReceived notfound for package (tx %s) from peer=%d\n", pendingpackage.RepresentativeWtxid().ToString(), nodeid); + } + } + std::optional ReceivedPkgTxns(NodeId nodeid, const std::vector& package_txns) + EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + AssertLockNotHeld(m_mutex); + LOCK(m_mutex); + auto peer_info_it = info_per_peer.find(nodeid); + if (peer_info_it == info_per_peer.end()) return std::nullopt; + const auto pending_iter{pending_package_info.find(GetPackageTxnsRequestId(nodeid, package_txns))}; + if (pending_iter == pending_package_info.end()) { + // For whatever reason, we've been sent a pkgtxns that doesn't correspond to a pending + // package. It's possible we already admitted all the transactions, or this response + // arrived past the request expiry. Drop it on the ground. + return std::nullopt; + } + std::vector unvalidated_txdata(package_txns.cbegin(), package_txns.cend()); + auto& pendingpackage{pending_iter->second}; + LogPrint(BCLog::TXPACKAGES, "\nReceived tx data for package (tx %s) from peer=%d\n", pendingpackage.RepresentativeWtxid().ToString(), nodeid); + // Add the other orphanage transactions before updating pending packages map. + for (const auto& [wtxid, _] : pendingpackage.m_txdata_status) { + if (m_orphanage.HaveTx(GenTxid::Wtxid(wtxid))) { + unvalidated_txdata.push_back(m_orphanage.GetTx(wtxid)); + } + } + // Only update this node's package info. We would have made a separate txdata request if for + // other package that also requires this transaction. + // update status and check if too many protected orphans + for (const auto& tx : package_txns) { + pendingpackage.UpdateStatusAndCheckSize(tx); + } + Assume(!pendingpackage.MissingTxData()); // FIXME: is this possible when honest? + return PackageToValidate{pendingpackage.m_pkginfo_provider, pendingpackage.RepresentativeWtxid(), + pendingpackage.GetPackageHash(), unvalidated_txdata}; + } +}; + +TxPackageTracker::TxPackageTracker() : m_impl{std::make_unique()} {} +TxPackageTracker::~TxPackageTracker() = default; + +bool TxPackageTracker::OrphanageHaveTx(const GenTxid& gtxid) { return m_impl->OrphanageHaveTx(gtxid); } +CTransactionRef TxPackageTracker::GetTxToReconsider(NodeId peer) { return m_impl->GetTxToReconsider(peer); } +int TxPackageTracker::EraseOrphanTx(const uint256& txid) { return m_impl->EraseOrphanTx(txid); } +void TxPackageTracker::DisconnectedPeer(NodeId peer) { m_impl->DisconnectedPeer(peer); } +void TxPackageTracker::BlockConnected(const CBlock& block) { m_impl->BlockConnected(block); } +void TxPackageTracker::LimitOrphans(unsigned int max_orphans) { m_impl->LimitOrphans(max_orphans); } +void TxPackageTracker::AddChildrenToWorkSet(const CTransaction& tx) { m_impl->AddChildrenToWorkSet(tx); } +bool TxPackageTracker::HaveTxToReconsider(NodeId peer) { return m_impl->HaveTxToReconsider(peer); } +size_t TxPackageTracker::OrphanageSize() { return m_impl->OrphanageSize(); } +void TxPackageTracker::ReceivedVersion(NodeId nodeid) { m_impl->ReceivedVersion(nodeid); } +void TxPackageTracker::ReceivedSendpackages(NodeId nodeid, uint32_t version) { m_impl->ReceivedSendpackages(nodeid, version); } +bool TxPackageTracker::ReceivedVerack(NodeId nodeid, bool txrelay, bool wtxidrelay) { + return m_impl->ReceivedVerack(nodeid, txrelay, wtxidrelay); +} +void TxPackageTracker::AddOrphanTx(NodeId nodeid, const uint256& wtxid, const CTransactionRef& tx, bool is_preferred, std::chrono::microseconds reqtime) +{ + m_impl->AddOrphanTx(nodeid, wtxid, tx, is_preferred, reqtime); +} +size_t TxPackageTracker::CountInFlight(NodeId nodeid) const { return m_impl->CountInFlight(nodeid); } +size_t TxPackageTracker::Count(NodeId nodeid) const { return m_impl->Count(nodeid); } +std::vector TxPackageTracker::GetOrphanRequests(NodeId nodeid, std::chrono::microseconds current_time) { + return m_impl->GetOrphanRequests(nodeid, current_time); +} +void TxPackageTracker::FinalizeTransactions(const std::set& valid, const std::set& invalid) +{ + m_impl->FinalizeTransactions(valid, invalid); +} +bool TxPackageTracker::PkgInfoAllowed(NodeId nodeid, const uint256& wtxid, uint32_t version) +{ + return m_impl->PkgInfoAllowed(nodeid, wtxid, version); +} +void TxPackageTracker::ForgetPkgInfo(NodeId nodeid, const uint256& rep_wtxid, uint32_t pkginfo_version) +{ + m_impl->ForgetPkgInfo(nodeid, rep_wtxid, pkginfo_version); +} +bool TxPackageTracker::ReceivedAncPkgInfo(NodeId nodeid, const uint256& rep_wtxid, const std::map& txdata_status, + const std::vector& missing_wtxids, std::chrono::microseconds expiry) +{ + return m_impl->ReceivedAncPkgInfo(nodeid, rep_wtxid, txdata_status, missing_wtxids, expiry); +} +void TxPackageTracker::ReceivedNotFound(NodeId nodeid, const uint256& hash) { m_impl->ReceivedNotFound(nodeid, hash); } +std::optional TxPackageTracker::ReceivedPkgTxns(NodeId nodeid, + const std::vector& package_txns) +{ + return m_impl->ReceivedPkgTxns(nodeid, package_txns); +} +} // namespace node diff --git a/src/node/txpackagetracker.h b/src/node/txpackagetracker.h new file mode 100644 index 0000000000000..e3e42d739abce --- /dev/null +++ b/src/node/txpackagetracker.h @@ -0,0 +1,148 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_NODE_TXPACKAGETRACKER_H +#define BITCOIN_NODE_TXPACKAGETRACKER_H + +#include +#include + +#include +#include +#include + +class CBlock; +class TxOrphanage; +namespace node { +static constexpr bool DEFAULT_ENABLE_PACKAGE_RELAY{false}; +static constexpr uint32_t RECEIVER_INIT_ANCESTOR_PACKAGES{0}; +static std::vector PACKAGE_RELAY_SUPPORTED_VERSIONS = { + RECEIVER_INIT_ANCESTOR_PACKAGES, +}; + +class TxPackageTracker { + class Impl; + const std::unique_ptr m_impl; + +public: + explicit TxPackageTracker(); + ~TxPackageTracker(); + + // Orphanage wrapper functions + /** Check if we already have an orphan transaction (by txid or wtxid) */ + bool OrphanageHaveTx(const GenTxid& gtxid); + + /** Extract a transaction from a peer's work set + * Returns nullptr if there are no transactions to work on. + * Otherwise returns the transaction reference, and removes + * it from the work set. + */ + CTransactionRef GetTxToReconsider(NodeId peer); + + /** Erase an orphan by txid */ + int EraseOrphanTx(const uint256& txid); + + /** Erase all orphans announced by a peer (eg, after that peer disconnects) */ + void DisconnectedPeer(NodeId peer); + + /** Erase all orphans included in or invalidated by a new block */ + void BlockConnected(const CBlock& block); + + /** Limit the orphanage to the given maximum */ + void LimitOrphans(unsigned int max_orphans); + + /** Add any orphans that list a particular tx as a parent into the from peer's work set */ + void AddChildrenToWorkSet(const CTransaction& tx); + + /** Does this peer have any orphans to validate? */ + bool HaveTxToReconsider(NodeId peer); + + /** Return how many entries exist in the orphange */ + size_t OrphanageSize(); + + std::vector GetVersions() { return PACKAGE_RELAY_SUPPORTED_VERSIONS; } + + // We expect this to be called only once + void ReceivedVersion(NodeId nodeid); + void ReceivedSendpackages(NodeId nodeid, uint32_t version); + // Finalize the registration state. + bool ReceivedVerack(NodeId nodeid, bool txrelay, bool wtxidrelay); + + /** Received an announcement from this peer for a tx we already know is an orphan; should be + * called for every peer that announces the tx, even if they are not a package relay peer. + * The orphan request tracker will decide when to request what from which peer - use + * GetOrphanRequests(). + */ + void AddOrphanTx(NodeId nodeid, const uint256& wtxid, const CTransactionRef& tx, bool is_preferred, std::chrono::microseconds reqtime); + + /** Number of packages we are working on with this peer. Includes any entries in the orphan + * tracker, in-flight orphan parent requests (1 per orphan regardless of how many missing + * parents were requested), package info requests, tx data download, and packages in the + * validation queue. */ + size_t Count(NodeId nodeid) const; + + /** Number of packages we are currently working on with this peer (i.e. reserving memory for + * storing orphan(s)). Includes in-flight package info requests, tx data download, and packages + * in the validation queue. Excludes entries in the orphan tracker that are just candidates. */ + size_t CountInFlight(NodeId nodeid) const; + + /** Get list of requests that should be sent to resolve orphans. These may be wtxids to send + * getdata(ANCPKGINFO) or txids corresponding to parents. Automatically marks the orphans as + * having outgoing requests. */ + std::vector GetOrphanRequests(NodeId nodeid, std::chrono::microseconds current_time); + + /** Update transactions for which we have made "final" decisions: transactions that have + * confirmed in a block, conflicted due to a block, or added to the mempool already. + * Should be called on new block: valid=block transactions, invalid=conflicts. + * Should be called when tx is added to mempool. + * Should not be called when a tx fails validation. + * */ + void FinalizeTransactions(const std::set& valid, const std::set& invalid); + + /** Whether a package info message is allowed: + * - We agreed to relay packages of this version with this peer. + * - We solicited this package info. + * Returns false if the peer should be disconnected. */ + bool PkgInfoAllowed(NodeId nodeid, const uint256& wtxid, uint32_t version); + + /** Record receipt of a notfound message for pkginfo. */ + void ForgetPkgInfo(NodeId nodeid, const uint256& rep_wtxid, uint32_t pkginfo_version); + + /** Record receipt of an ancpkginfo, which transactions are missing (and requested), + * and when to expire it. */ + bool ReceivedAncPkgInfo(NodeId nodeid, const uint256& rep_wtxid, const std::map& txdata_status, + const std::vector& missing_wtxids, std::chrono::microseconds expiry); + + /** Record receipt of notfound message for pkgtxns. */ + void ReceivedNotFound(NodeId nodeid, const uint256& hash); + + struct PackageToValidate { + /** Who provided the package info. */ + const NodeId m_info_provider; + /** Representative transaction, i.e. orphan in an ancestor package. */ + const uint256 m_rep_wtxid; + /** Combined hash of all transactions in package info. Used to cache failure. */ + const uint256 m_pkginfo_hash; + /** Transactions to submit for mempool validation. */ + const Package m_unvalidated_txns; + + PackageToValidate() = delete; + PackageToValidate(NodeId info_provider, + const uint256& rep_wtxid, + const uint256& pkginfo_hash, + const Package& txns) : + m_info_provider{info_provider}, + m_rep_wtxid{rep_wtxid}, + m_pkginfo_hash{pkginfo_hash}, + m_unvalidated_txns{txns} + {} + }; + + /** If there is a package that is missing this tx data, updates the PendingPackage and + * returns a PackageToValidate including the other txdata stored in the orphanage. + */ + std::optional ReceivedPkgTxns(NodeId nodeid, const std::vector& package_txns); +}; +} // namespace node +#endif // BITCOIN_NODE_TXPACKAGETRACKER_H diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index 6e70a94088a2d..5aa7d1495cc1a 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include @@ -13,27 +13,9 @@ #include #include #include -#include -bool CheckPackage(const Package& txns, PackageValidationState& state) +bool IsSorted(const Package& txns) { - const unsigned int package_count = txns.size(); - - if (package_count > MAX_PACKAGE_COUNT) { - return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); - } - - const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, - [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); - // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. - if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { - return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); - } - - // Require the package to be sorted in order of dependency, i.e. parents appear before children. - // An unsorted package will fail anyway on missing-inputs, but it's better to quit earlier and - // fail on something less ambiguous (missing-inputs could also be an orphan or trying to - // spend nonexistent coins). std::unordered_set later_txids; std::transform(txns.cbegin(), txns.cend(), std::inserter(later_txids, later_txids.end()), [](const auto& tx) { return tx->GetHash(); }); @@ -41,19 +23,23 @@ bool CheckPackage(const Package& txns, PackageValidationState& state) for (const auto& input : tx->vin) { if (later_txids.find(input.prevout.hash) != later_txids.end()) { // The parent is a subsequent transaction in the package. - return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-sorted"); + return false; } } later_txids.erase(tx->GetHash()); } + return true; +} +bool IsConsistent(const Package& txns) +{ // Don't allow any conflicting transactions, i.e. spending the same inputs, in a package. std::unordered_set inputs_seen; for (const auto& tx : txns) { for (const auto& input : tx->vin) { if (inputs_seen.find(input.prevout) != inputs_seen.end()) { // This input is also present in another tx in the package. - return state.Invalid(PackageValidationResult::PCKG_POLICY, "conflict-in-package"); + return false; } } // Batch-add all the inputs for a tx at a time. If we added them 1 at a time, we could @@ -65,6 +51,30 @@ bool CheckPackage(const Package& txns, PackageValidationState& state) return true; } +bool IsPackageWellFormed(const Package& txns, PackageValidationState& state, bool require_sorted) +{ + const unsigned int package_count = txns.size(); + + if (package_count > MAX_PACKAGE_COUNT) { + return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); + } + + const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, + [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); + // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. + if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { + return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); + } + + // Require the package to be sorted in order of dependency, i.e. parents appear before children. + // An unsorted package will fail anyway on missing-inputs, but it's better to quit earlier and + // fail on something less ambiguous (missing-inputs could also be an orphan or trying to + // spend nonexistent coins). + if (require_sorted && !IsSorted(txns)) return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-sorted"); + if (!IsConsistent(txns)) return state.Invalid(PackageValidationResult::PCKG_POLICY, "conflict-in-package"); + return true; +} + bool IsChildWithParents(const Package& package) { assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;})); @@ -81,3 +91,101 @@ bool IsChildWithParents(const Package& package) return std::all_of(package.cbegin(), package.cend() - 1, [&input_txids](const auto& ptx) { return input_txids.count(ptx->GetHash()) > 0; }); } + +// Calculates curr_tx's in-package ancestor set. If the tx spends another tx in the package, calls +// visit() for that transaction first, since any transaction's ancestor set includes its parents' +// ancestor sets. Transaction dependency cycles are not possible without breaking sha256 and +// duplicate transactions were checked in the Packageifier() ctor, so this won't recurse infinitely. +// After this function returns, curr_tx is guaranteed to be in the ancestor_subsets map. +void Packageifier::visit(const CTransactionRef& curr_tx) +{ + const uint256& curr_txid = curr_tx->GetHash(); + if (ancestor_subsets.count(curr_txid) > 0) return; + std::set my_ancestors; + my_ancestors.insert(curr_txid); + for (const auto& input : curr_tx->vin) { + auto parent_tx = txid_to_tx.find(input.prevout.hash); + if (parent_tx == txid_to_tx.end()) continue; + if (ancestor_subsets.count(parent_tx->first) == 0) { + visit(parent_tx->second); + } + auto parent_ancestor_set = ancestor_subsets.find(parent_tx->first); + my_ancestors.insert(parent_ancestor_set->second.cbegin(), parent_ancestor_set->second.cend()); + } + ancestor_subsets.insert(std::make_pair(curr_txid, my_ancestors)); +} + +Packageifier::Packageifier(const Package& txns_in) +{ + // Duplicate transactions are not allowed, as they will result in infinite visit() recusion. + Assume(IsConsistent(txns_in)); + // Populate txid_to_tx for quick lookup + std::transform(txns_in.cbegin(), txns_in.cend(), std::inserter(txid_to_tx, txid_to_tx.end()), + [](const auto& tx) { return std::make_pair(tx->GetHash(), tx); }); + // DFS-based algorithm to sort transactions by ancestor count and populate ancestor_subsets cache. + // Best case runtime is if the package is already sorted and no recursive calls happen. + // Exclusion from ancestor_subsets is equivalent to not yet being fully processed. + size_t i{0}; + while (ancestor_subsets.size() < txns_in.size() && i < txns_in.size()) { + const auto& tx = txns_in[i]; + if (ancestor_subsets.count(tx->GetHash()) == 0) visit(tx); + Assume(ancestor_subsets.count(tx->GetHash()) == 1); + ++i; + } + txns = txns_in; + // Sort by the number of in-package ancestors. + std::sort(txns.begin(), txns.end(), [&](const CTransactionRef& a, const CTransactionRef& b) -> bool { + auto a_ancestors = ancestor_subsets.find(a->GetHash()); + auto b_ancestors = ancestor_subsets.find(b->GetHash()); + return a_ancestors->second.size() < b_ancestors->second.size(); + }); + Assume(IsSorted(txns)); +} + +std::optional> Packageifier::GetAncestorSet(const CTransactionRef& tx) +{ + auto ancestor_set = ancestor_subsets.find(tx->GetHash()); + std::vector result; + for (const auto& txid : ancestor_set->second) { + if (banned_txns.find(txid) != banned_txns.end()) { + return std::nullopt; + } + } + result.reserve(ancestor_set->second.size()); + for (const auto& txid : ancestor_set->second) { + auto it = txid_to_tx.find(txid); + if (excluded_txns.find(txid) == excluded_txns.end()) { + result.push_back(it->second); + } + } + std::sort(result.begin(), result.end(), [&](const CTransactionRef& a, const CTransactionRef& b) -> bool { + auto a_ancestors = ancestor_subsets.find(a->GetHash()); + auto b_ancestors = ancestor_subsets.find(b->GetHash()); + return a_ancestors->second.size() < b_ancestors->second.size(); + }); + return result; +} + +void Packageifier::Exclude(const CTransactionRef& transaction) +{ + excluded_txns.insert(transaction->GetHash()); +} +void Packageifier::Ban(const CTransactionRef& transaction) +{ + banned_txns.insert(transaction->GetHash()); +} + +uint256 GetCombinedHash(const std::vector& wtxids) +{ + std::vector wtxids_copy(wtxids.cbegin(), wtxids.cend()); + std::sort(wtxids_copy.begin(), wtxids_copy.end()); + return (CHashWriter(SER_GETHASH, 0) << wtxids_copy).GetHash(); +} +uint256 GetPackageHash(const std::vector& transactions) +{ + std::vector wtxids_copy; + std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(wtxids_copy), + [](const auto& tx){ return tx->GetWitnessHash(); }); + std::sort(wtxids_copy.begin(), wtxids_copy.end()); + return (CHashWriter(SER_GETHASH, 0) << wtxids_copy).GetHash(); +} diff --git a/src/policy/packages.h b/src/policy/packages.h index 0a0e7cf6bb85e..7980a1094d6fc 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -9,8 +9,11 @@ #include #include #include +#include +#include #include +#include #include /** Default maximum number of transactions in a package. */ @@ -45,13 +48,27 @@ using Package = std::vector; class PackageValidationState : public ValidationState {}; +/** If any direct dependencies exist between transactions (i.e. a child spending the output of a + * parent), checks that all parents appear somewhere in the list before their respective children. + * This function cannot detect indirect dependencies (e.g. a transaction's grandparent if its parent + * is not present). + * @returns true if sorted. False if any tx spends the output of a tx that appears later in txns. + */ +bool IsSorted(const Package& txns); + +/** Checks that none of the transactions conflict, i.e., spend the same prevout. Consequently also + * checks that there are no duplicate transactions. + * @returns true if there are no conflicts. False if any two transactions spend the same prevout. + * */ +bool IsConsistent(const Package& txns); + /** Context-free package policy checks: * 1. The number of transactions cannot exceed MAX_PACKAGE_COUNT. * 2. The total virtual size cannot exceed MAX_PACKAGE_SIZE. * 3. If any dependencies exist between transactions, parents must appear before children. * 4. Transactions cannot conflict, i.e., spend the same inputs. */ -bool CheckPackage(const Package& txns, PackageValidationState& state); +bool IsPackageWellFormed(const Package& txns, PackageValidationState& state, bool require_sorted); /** Context-free check that a package is exactly one child and its parents; not all parents need to * be present, but the package must not contain any transactions that are not the child's parents. @@ -59,4 +76,48 @@ bool CheckPackage(const Package& txns, PackageValidationState& state); */ bool IsChildWithParents(const Package& package); +class Packageifier +{ + /** Transactions sorted topologically (see IsSorted()). */ + Package txns; + /** Map from txid to transaction for quick lookup. */ + std::map txid_to_tx; + /** Cache of the in-package ancestors for each transaction, indexed by txid. */ + std::map> ancestor_subsets; + /** Txids of transactions to exclude when returning ancestor subsets.*/ + std::unordered_set excluded_txns; + /** Txids of transactions that are banned. Return nullopt from GetAncestorSet() if it contains + * any of these transactions.*/ + std::unordered_set banned_txns; + + /** Helper function for recursively constructing ancestor caches in ctor. */ + void visit(const CTransactionRef&); +public: + /** Constructs ancestor package, sorting the transactions topologically and constructing the + * txid_to_tx and ancestor_subsets maps. It is ok if the input txns is not sorted. + * Expects: + * - No duplicate transactions. + * - No conflicts between transactions. + * - txns is of reasonable size (e.g. below MAX_PACKAGE_COUNT) to limit recursion depth + */ + Packageifier(const Package& txns); + /** Returns the transactions, in ascending order of number of in-package ancestors. */ + Package Txns() const { return txns; } + /** Get the ancestor subpackage for a tx, sorted so that ancestors appear before descendants. + * The list includes the tx. If this transaction depends on a Banned tx, returns std::nullopt. + * If one or more ancestors have been Excluded, they will not appear in the result. */ + std::optional> GetAncestorSet(const CTransactionRef& tx); + /** From now on, exclude this tx from any result in GetAncestorSet(). Does not affect Txns(). */ + void Exclude(const CTransactionRef& transaction); + /** Mark a transaction as "banned." From now on, if this transaction is present in the ancestor + * set, GetAncestorSet() should return std::nullopt for that tx. Does not affect Txns(). */ + void Ban(const CTransactionRef& transaction); +}; + + +/** Get the hash of these wtxids, concatenated in lexicographical order. */ +uint256 GetCombinedHash(const std::vector& wtxids); +/** Get the hash of these transactions' wtxids, concatenated in lexicographical order. */ +uint256 GetPackageHash(const std::vector& transactions); + #endif // BITCOIN_POLICY_PACKAGES_H diff --git a/src/protocol.cpp b/src/protocol.cpp index aa59bae6ffb8d..75793afca4f2f 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -45,6 +45,10 @@ const char *GETCFCHECKPT="getcfcheckpt"; const char *CFCHECKPT="cfcheckpt"; const char *WTXIDRELAY="wtxidrelay"; const char *SENDTXRCNCL="sendtxrcncl"; +const char *SENDPACKAGES="sendpackages"; +const char *ANCPKGINFO="ancpkginfo"; +const char *GETPKGTXNS="getpkgtxns"; +const char *PKGTXNS="pkgtxns"; } // namespace NetMsgType /** All known message types. Keep this in the same order as the list of @@ -86,6 +90,10 @@ const static std::string allNetMessageTypes[] = { NetMsgType::CFCHECKPT, NetMsgType::WTXIDRELAY, NetMsgType::SENDTXRCNCL, + NetMsgType::SENDPACKAGES, + NetMsgType::ANCPKGINFO, + NetMsgType::GETPKGTXNS, + NetMsgType::PKGTXNS, }; const static std::vector allNetMessageTypesVec(std::begin(allNetMessageTypes), std::end(allNetMessageTypes)); @@ -164,6 +172,7 @@ std::string CInv::GetCommand() const case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK); case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK); case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK); + case MSG_ANCPKGINFO: return cmd.append(NetMsgType::ANCPKGINFO); default: throw std::out_of_range(strprintf("CInv::GetCommand(): type=%d unknown type", type)); } @@ -219,6 +228,6 @@ std::vector serviceFlagsToStr(uint64_t flags) GenTxid ToGenTxid(const CInv& inv) { - assert(inv.IsGenTxMsg()); + assert(inv.IsGenTxMsg() || inv.IsMsgAncPkgInfo()); return inv.IsMsgWtx() ? GenTxid::Wtxid(inv.hash) : GenTxid::Txid(inv.hash); } diff --git a/src/protocol.h b/src/protocol.h index cbcd400fef9dd..2133abea27091 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -264,6 +264,20 @@ extern const char* WTXIDRELAY; * txreconciliation, as described by BIP 330. */ extern const char* SENDTXRCNCL; +/** + * Indicates that a node wants to relay packages, described in BIP 331. + */ +extern const char* SENDPACKAGES; +/** List of wtxids corresponding to a transaction's ancestor package. */ +extern const char* ANCPKGINFO; +/** + * Requests all or none of a list of transactions, specified by wtxid. + */ +extern const char* GETPKGTXNS; +/** + * List of transactions. + */ +extern const char* PKGTXNS; }; // namespace NetMsgType /* Get a vector of all valid message types (see above) */ @@ -471,6 +485,8 @@ enum GetDataMsg : uint32_t { // MSG_FILTERED_WITNESS_BLOCK is defined in BIP144 as reserved for future // use and remains unused. // MSG_FILTERED_WITNESS_BLOCK = MSG_FILTERED_BLOCK | MSG_WITNESS_FLAG, + MSG_ANCPKGINFO = 6, //!< Defined in BIP331 + MSG_PKGTXNS = 7, //!< Defined in BIP331 }; /** inv message data */ @@ -494,6 +510,8 @@ class CInv bool IsMsgFilteredBlk() const { return type == MSG_FILTERED_BLOCK; } bool IsMsgCmpctBlk() const { return type == MSG_CMPCT_BLOCK; } bool IsMsgWitnessBlk() const { return type == MSG_WITNESS_BLOCK; } + bool IsMsgAncPkgInfo() const { return type == MSG_ANCPKGINFO; } + bool IsMsgPkgTxns() const { return type == MSG_PKGTXNS; } // Combined-message helper methods bool IsGenTxMsg() const diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 927b4ce1fcf09..ffa8e1d3a2747 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -759,11 +759,10 @@ static RPCHelpMan savemempool() static RPCHelpMan submitpackage() { return RPCHelpMan{"submitpackage", - "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n" + "Submit a package of raw transactions (serialized, hex-encoded) to local node.\n" "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n" "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n" - "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n" - "Currently, each transaction is broadcasted individually after submission, which means they must meet other nodes' feerate requirements alone.\n" + "Warning: unless this node and others are using package relay (-packagerelay), successful submission does not mean the transactions will propagate throughout the network.\n" , { {"package", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of raw transactions.", @@ -802,9 +801,6 @@ static RPCHelpMan submitpackage() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - if (!Params().IsMockableChain()) { - throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only"); - } const UniValue raw_transactions = request.params[0].get_array(); if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) { throw JSONRPCError(RPC_INVALID_PARAMETER, @@ -922,7 +918,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t) {"blockchain", &getmempoolinfo}, {"blockchain", &getrawmempool}, {"blockchain", &savemempool}, - {"hidden", &submitpackage}, + {"rawtransactions", &submitpackage}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 7ffa777ef42d9..c2f5467e2ac28 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -115,6 +115,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "SERVICE_NAME", "the service name if it is recognised"} }}, {RPCResult::Type::BOOL, "relaytxes", "Whether we relay transactions to this peer"}, + {RPCResult::Type::BOOL, "relaytxpackages", "Whether we relay packages with this peer"}, {RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"}, {RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"}, {RPCResult::Type::NUM_TIME, "last_transaction", "The " + UNIX_EPOCH_TIME + " of the last valid transaction received from this peer"}, @@ -243,6 +244,7 @@ static RPCHelpMan getpeerinfo() heights.push_back(height); } obj.pushKV("inflight", heights); + obj.pushKV("relaytxpackages", statestats.m_package_relay); obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled); obj.pushKV("addr_processed", statestats.m_addr_processed); obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index aca2b8eff079a..76f19f6b4ee19 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -132,7 +132,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) NodeId id{0}; auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman); auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, - *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false, false); constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS; CConnman::Options options; @@ -209,7 +209,7 @@ BOOST_AUTO_TEST_CASE(block_relay_only_eviction) NodeId id{0}; auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman); auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, - *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false, false); constexpr int max_outbound_block_relay{MAX_BLOCK_RELAY_ONLY_CONNECTIONS}; constexpr int64_t MINIMUM_CONNECT_TIME{30}; @@ -273,7 +273,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman); auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), - *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false, false); CNetAddr tor_netaddr; BOOST_REQUIRE( @@ -376,7 +376,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman); auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), - *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false, false); banman->ClearBanned(); int64_t nStartTime = GetTime(); diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index a2c477433847e..d0d532878e119 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include