aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/net_processing.cpp259
-rw-r--r--src/policy/packages.cpp18
-rw-r--r--src/policy/packages.h5
-rw-r--r--src/test/fuzz/txorphan.cpp32
-rw-r--r--src/test/orphanage_tests.cpp147
-rw-r--r--src/test/txpackage_tests.cpp98
-rw-r--r--src/txorphanage.cpp74
-rw-r--r--src/txorphanage.h8
8 files changed, 626 insertions, 15 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 39ffff97d2..da4f99fb99 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -586,7 +586,7 @@ private:
* @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact.
* Set to false if the tx has already been rejected before,
* e.g. is an orphan, to avoid adding duplicate entries.
- * Updates m_txrequest, m_recent_rejects, m_orphanage, and vExtraTxnForCompact. */
+ * Updates m_txrequest, m_recent_rejects, m_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */
void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
bool maybe_add_extra_compact_tx)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
@@ -596,6 +596,45 @@ private:
void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+ /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for
+ * individual transactions, and caches rejection for the package as a group.
+ * @param[in] senders Must contain the nodeids of the peers that provided each transaction
+ * in package, in the same order.
+ * */
+ void ProcessPackageResult(const Package& package, const PackageMempoolAcceptResult& package_result, const std::vector<NodeId>& senders)
+ EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+
+ /** A package to validate */
+ struct PackageToValidate {
+ const Package m_txns;
+ const std::vector<NodeId> m_senders;
+ /** Construct a 1-parent-1-child package. */
+ explicit PackageToValidate(const CTransactionRef& parent,
+ const CTransactionRef& child,
+ NodeId parent_sender,
+ NodeId child_sender) :
+ m_txns{parent, child},
+ m_senders {parent_sender, child_sender}
+ {}
+
+ std::string ToString() const {
+ Assume(m_txns.size() == 2);
+ return strprintf("parent %s (wtxid=%s, sender=%d) + child %s (wtxid=%s, sender=%d)",
+ m_txns.front()->GetHash().ToString(),
+ m_txns.front()->GetWitnessHash().ToString(),
+ m_senders.front(),
+ m_txns.back()->GetHash().ToString(),
+ m_txns.back()->GetWitnessHash().ToString(),
+ m_senders.back());
+ }
+ };
+
+ /** Look for a child of this transaction in the orphanage to form a 1-parent-1-child package,
+ * skipping any combinations that have already been tried. Return the resulting package along with
+ * the senders of its respective transactions, or std::nullopt if no package is found. */
+ std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid)
+ EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+
/**
* Reconsider orphan transactions after a parent has been accepted to the mempool.
*
@@ -806,7 +845,16 @@ private:
/** Stalling timeout for blocks in IBD */
std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
- bool AlreadyHaveTx(const GenTxid& gtxid)
+ /** Check whether we already have this gtxid in:
+ * - mempool
+ * - orphanage
+ * - m_recent_rejects
+ * - m_recent_rejects_reconsiderable (if include_reconsiderable = true)
+ * - m_recent_confirmed_transactions
+ * Also responsible for resetting m_recent_rejects and m_recent_rejects_reconsiderable if the
+ * chain tip has changed.
+ * */
+ bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
/**
@@ -844,8 +892,32 @@ private:
* Memory used: 1.3 MB
*/
CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
+ /** Block hash of chain tip the last time we reset m_recent_rejects and
+ * m_recent_rejects_reconsiderable. */
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
+ /**
+ * Filter for:
+ * (1) wtxids of transactions that were recently rejected by the mempool but are
+ * eligible for reconsideration if submitted with other transactions.
+ * (2) packages (see GetPackageHash) we have already rejected before and should not retry.
+ *
+ * Similar to m_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers
+ * have larger mempools and thus lower minimum feerates than us.
+ *
+ * When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by
+ * itself), add its wtxid to this filter. When a package fails for any reason, add the combined
+ * hash to this filter.
+ *
+ * Upon receiving an announcement for a transaction, if it exists in this filter, do not
+ * download the txdata. When considering packages, if it exists in this filter, drop it.
+ *
+ * Reset this filter when the chain tip changes.
+ *
+ * Parameters are picked to be the same as m_recent_rejects, with the same rationale.
+ */
+ CRollingBloomFilter m_recent_rejects_reconsiderable GUARDED_BY(::cs_main){120'000, 0.000'001};
+
/*
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
@@ -2194,7 +2266,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta
//
-bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
+bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable)
{
if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
// If the chain tip has changed previously rejected transactions
@@ -2203,12 +2275,15 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
// txs a second chance.
hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
m_recent_rejects.reset();
+ m_recent_rejects_reconsiderable.reset();
}
const uint256& hash = gtxid.GetHash();
if (m_orphanage.HaveTx(gtxid)) return true;
+ if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true;
+
{
LOCK(m_recent_confirmed_transactions_mutex);
if (m_recent_confirmed_transactions.contains(hash)) return true;
@@ -3097,7 +3172,14 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
- m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256());
+ if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) {
+ // If the result is TX_RECONSIDERABLE, add it to m_recent_rejects_reconsiderable
+ // because we should not download or submit this transaction by itself again, but may
+ // submit it as part of a package later.
+ m_recent_rejects_reconsiderable.insert(ptx->GetWitnessHash().ToUint256());
+ } else {
+ m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256());
+ }
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
@@ -3107,6 +3189,8 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx
// processing of this transaction in the event that child
// transactions are later received (resulting in
// parent-fetching by txid via the orphan-handling logic).
+ // We only add the txid if it differs from the wtxid, to avoid wasting entries in the
+ // rolling bloom filter.
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) {
m_recent_rejects.insert(ptx->GetHash().ToUint256());
m_txrequest.ForgetTxHash(ptx->GetHash());
@@ -3153,6 +3237,117 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c
}
}
+void PeerManagerImpl::ProcessPackageResult(const Package& package, const PackageMempoolAcceptResult& package_result, const std::vector<NodeId>& senders)
+{
+ AssertLockNotHeld(m_peer_mutex);
+ AssertLockHeld(g_msgproc_mutex);
+ AssertLockHeld(cs_main);
+
+ if (package_result.m_state.IsInvalid()) {
+ m_recent_rejects_reconsiderable.insert(GetPackageHash(package));
+ }
+ // We currently only expect to process 1-parent-1-child packages. Remove if this changes.
+ if (!Assume(package.size() == 2)) return;
+
+ // No package results to look through for PCKG_POLICY or PCKG_MEMPOOL_ERROR
+ if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY ||
+ package_result.m_state.GetResult() == PackageValidationResult::PCKG_MEMPOOL_ERROR) return;
+
+ // Iterate backwards to erase in-package descendants from the orphanage before they become
+ // relevant in AddChildrenToWorkSet.
+ auto package_iter = package.rbegin();
+ auto senders_iter = senders.rbegin();
+ while (package_iter != package.rend()) {
+ const auto& tx = *package_iter;
+ const NodeId nodeid = *senders_iter;
+ const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())};
+ if (Assume(it_result != package_result.m_tx_results.end())) {
+ const auto& tx_result = it_result->second;
+ switch (tx_result.m_result_type) {
+ case MempoolAcceptResult::ResultType::VALID:
+ {
+ Assume(tx_result.m_replaced_transactions.has_value());
+ std::list<CTransactionRef> empty_replacement_list;
+ ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions.value_or(empty_replacement_list));
+ break;
+ }
+ case MempoolAcceptResult::ResultType::INVALID:
+ case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS:
+ {
+ // Don't add to vExtraTxnForCompact, as these transactions should have already been
+ // added there when added to the orphanage or rejected for TX_RECONSIDERABLE.
+ // This should be updated if package submission is ever used for transactions
+ // that haven't already been validated before.
+ ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*maybe_add_extra_compact_tx=*/false);
+ break;
+ }
+ case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY:
+ {
+ // AlreadyHaveTx() should be catching transactions that are already in mempool.
+ Assume(false);
+ break;
+ }
+ }
+ }
+ package_iter++;
+ senders_iter++;
+ }
+}
+
+std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid)
+{
+ AssertLockNotHeld(m_peer_mutex);
+ AssertLockHeld(g_msgproc_mutex);
+ AssertLockHeld(cs_main);
+
+ const auto& parent_wtxid{ptx->GetWitnessHash()};
+
+ Assume(m_recent_rejects_reconsiderable.contains(parent_wtxid.ToUint256()));
+
+ // Prefer children from this peer. This helps prevent censorship attempts in which an attacker
+ // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake
+ // children instead of the real one provided by the honest peer.
+ const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)};
+
+ // These children should be sorted from newest to oldest. In the (probably uncommon) case
+ // of children that replace each other, this helps us accept the highest feerate (probably the
+ // most recent) one efficiently.
+ for (const auto& child : cpfp_candidates_same_peer) {
+ Package maybe_cpfp_package{ptx, child};
+ if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) {
+ return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid};
+ }
+ }
+
+ // If no suitable candidate from the same peer is found, also try children that were provided by
+ // a different peer. This is useful because sometimes multiple peers announce both transactions
+ // to us, and we happen to download them from different peers (we wouldn't have known that these
+ // 2 transactions are related). We still want to find 1p1c packages then.
+ //
+ // If we start tracking all announcers of orphans, we can restrict this logic to parent + child
+ // pairs in which both were provided by the same peer, i.e. delete this step.
+ const auto cpfp_candidates_different_peer{m_orphanage.GetChildrenFromDifferentPeer(ptx, nodeid)};
+
+ // Find the first 1p1c that hasn't already been rejected. We randomize the order to not
+ // create a bias that attackers can use to delay package acceptance.
+ //
+ // Create a random permutation of the indices.
+ std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
+ std::iota(tx_indices.begin(), tx_indices.end(), 0);
+ Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
+
+ for (const auto index : tx_indices) {
+ // If we already tried a package and failed for any reason, the combined hash was
+ // cached in m_recent_rejects_reconsiderable.
+ const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index);
+ Package maybe_cpfp_package{ptx, child_tx};
+ if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) {
+ return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender};
+ }
+ }
+ return std::nullopt;
+}
+
bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
{
AssertLockHeld(g_msgproc_mutex);
@@ -4013,7 +4208,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
const GenTxid gtxid = ToGenTxid(inv);
- const bool fAlreadyHave = AlreadyHaveTx(gtxid);
+ const bool fAlreadyHave = AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
AddKnownTx(*peer, inv.hash);
@@ -4318,7 +4513,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// already; and an adversary can already relay us old transactions
// (older than our recency filter) if trying to DoS us, without any need
// for witness malleation.
- if (AlreadyHaveTx(GenTxid::Wtxid(wtxid))) {
+ if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/true)) {
if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
// Always relay transactions received from peers with forcerelay
// permission, even if they were already in the mempool, allowing
@@ -4332,6 +4527,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
}
}
+
+ if (m_recent_rejects_reconsiderable.contains(wtxid)) {
+ // When a transaction is already in m_recent_rejects_reconsiderable, we shouldn't submit
+ // it by itself again. However, look for a matching child in the orphanage, as it is
+ // possible that they succeed as a package.
+ LogPrint(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n",
+ txid.ToString(), wtxid.ToString());
+ if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) {
+ const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
+ LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
+ package_result.m_state.IsValid() ? "package accepted" : "package rejected");
+ ProcessPackageResult(package_to_validate->m_txns, package_result, package_to_validate->m_senders);
+ }
+ }
// If a tx is detected by m_recent_rejects it is ignored. Because we haven't
// submitted the tx to our mempool, we won't have computed a DoS
// score for it or determined exactly why we consider it invalid.
@@ -4354,7 +4563,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
const TxValidationState& state = result.m_state;
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
- ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions.value());
+ Assume(result.m_replaced_transactions.has_value());
+ std::list<CTransactionRef> empty_replacement_list;
+ ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions.value_or(empty_replacement_list));
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
@@ -4371,10 +4582,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
std::sort(unique_parents.begin(), unique_parents.end());
unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
+
+ // Distinguish between parents in m_recent_rejects and m_recent_rejects_reconsiderable.
+ // We can tolerate having up to 1 parent in m_recent_rejects_reconsiderable since we
+ // submit 1p1c packages. However, fail immediately if any are in m_recent_rejects.
+ std::optional<uint256> rejected_parent_reconsiderable;
for (const uint256& parent_txid : unique_parents) {
if (m_recent_rejects.contains(parent_txid)) {
fRejectedParents = true;
break;
+ } else if (m_recent_rejects_reconsiderable.contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) {
+ // More than 1 parent in m_recent_rejects_reconsiderable: 1p1c will not be
+ // sufficient to accept this package, so just give up here.
+ if (rejected_parent_reconsiderable.has_value()) {
+ fRejectedParents = true;
+ break;
+ }
+ rejected_parent_reconsiderable = parent_txid;
}
}
if (!fRejectedParents) {
@@ -4388,7 +4612,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// protocol for getting all unconfirmed parents.
const auto gtxid{GenTxid::Txid(parent_txid)};
AddKnownTx(*peer, parent_txid);
- if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
+ // Exclude m_recent_rejects_reconsiderable: the missing parent may have been
+ // previously rejected for being too low feerate. This orphan might CPFP it.
+ if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time);
}
if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
@@ -4420,6 +4646,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (state.IsInvalid()) {
ProcessInvalidTx(pfrom.GetId(), ptx, state, /*maybe_add_extra_compact_tx=*/true);
}
+ // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the
+ // orphanage, as it is possible that they succeed as a package.
+ if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) {
+ LogPrint(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n",
+ txid.ToString(), wtxid.ToString());
+ if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) {
+ const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
+ LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
+ package_result.m_state.IsValid() ? "package accepted" : "package rejected");
+ ProcessPackageResult(package_to_validate->m_txns, package_result, package_to_validate->m_senders);
+ }
+ }
+
return;
}
@@ -6029,7 +6268,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
entry.second.GetHash().ToString(), entry.first);
}
for (const GenTxid& gtxid : requestable) {
- if (!AlreadyHaveTx(gtxid)) {
+ // Exclude m_recent_rejects_reconsiderable: we may be requesting a missing parent
+ // that was previously rejected for being too low feerate.
+ if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) {
LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
gtxid.GetHash().ToString(), pto->GetId());
vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp
index 3a63a9fe46..99d2a6d514 100644
--- a/src/policy/packages.cpp
+++ b/src/policy/packages.cpp
@@ -147,3 +147,21 @@ bool IsChildWithParentsTree(const Package& package)
return true;
});
}
+
+uint256 GetPackageHash(const std::vector<CTransactionRef>& transactions)
+{
+ // Create a vector of the wtxids.
+ std::vector<Wtxid> wtxids_copy;
+ std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(wtxids_copy),
+ [](const auto& tx){ return tx->GetWitnessHash(); });
+
+ // Sort in ascending order
+ std::sort(wtxids_copy.begin(), wtxids_copy.end(), [](const auto& lhs, const auto& rhs) { return lhs.GetHex() < rhs.GetHex(); });
+
+ // Get sha256 hash of the wtxids concatenated in this order
+ HashWriter hashwriter;
+ for (const auto& wtxid : wtxids_copy) {
+ hashwriter << wtxid;
+ }
+ return hashwriter.GetSHA256();
+}
diff --git a/src/policy/packages.h b/src/policy/packages.h
index 537d8476e2..3050320122 100644
--- a/src/policy/packages.h
+++ b/src/policy/packages.h
@@ -88,4 +88,9 @@ bool IsChildWithParents(const Package& package);
* other (the package is a "tree").
*/
bool IsChildWithParentsTree(const Package& package);
+
+/** Get the hash of these transactions' wtxids, concatenated in lexicographical order (treating the
+ * wtxids as little endian encoded uint256, smallest to largest). */
+uint256 GetPackageHash(const std::vector<CTransactionRef>& transactions);
+
#endif // BITCOIN_POLICY_PACKAGES_H
diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp
index 5423ba8920..a44f47b00d 100644
--- a/src/test/fuzz/txorphan.cpp
+++ b/src/test/fuzz/txorphan.cpp
@@ -45,6 +45,8 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
// if true, allow duplicate input when constructing tx
const bool duplicate_input = fuzzed_data_provider.ConsumeBool();
+ CTransactionRef ptx_potential_parent = nullptr;
+
LIMITED_WHILE(outpoints.size() < 200'000 && fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS)
{
// construct transaction
@@ -78,6 +80,27 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
return new_tx;
}();
+ // Trigger orphanage functions that are called using parents. ptx_potential_parent is a tx we constructed in a
+ // previous loop and potentially the parent of this tx.
+ if (ptx_potential_parent) {
+ // Set up future GetTxToReconsider call.
+ orphanage.AddChildrenToWorkSet(*ptx_potential_parent);
+
+ // Check that all txns returned from GetChildrenFrom* are indeed a direct child of this tx.
+ NodeId peer_id = fuzzed_data_provider.ConsumeIntegral<NodeId>();
+ for (const auto& child : orphanage.GetChildrenFromSamePeer(ptx_potential_parent, peer_id)) {
+ assert(std::any_of(child->vin.cbegin(), child->vin.cend(), [&](const auto& input) {
+ return input.prevout.hash == ptx_potential_parent->GetHash();
+ }));
+ }
+ for (const auto& [child, peer] : orphanage.GetChildrenFromDifferentPeer(ptx_potential_parent, peer_id)) {
+ assert(std::any_of(child->vin.cbegin(), child->vin.cend(), [&](const auto& input) {
+ return input.prevout.hash == ptx_potential_parent->GetHash();
+ }));
+ assert(peer != peer_id);
+ }
+ }
+
// trigger orphanage functions
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS)
{
@@ -86,9 +109,6 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
CallOneOf(
fuzzed_data_provider,
[&] {
- orphanage.AddChildrenToWorkSet(*tx);
- },
- [&] {
{
CTransactionRef ref = orphanage.GetTxToReconsider(peer_id);
if (ref) {
@@ -136,6 +156,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
orphanage.LimitOrphans(limit, limit_orphans_rng);
Assert(orphanage.Size() <= limit);
});
+
+ }
+ // Set tx as potential parent to be used for future GetChildren() calls.
+ if (!ptx_potential_parent || fuzzed_data_provider.ConsumeBool()) {
+ ptx_potential_parent = tx;
}
+
}
}
diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp
index 4231fcc909..b2643cf678 100644
--- a/src/test/orphanage_tests.cpp
+++ b/src/test/orphanage_tests.cpp
@@ -38,14 +38,56 @@ public:
}
};
-static void MakeNewKeyWithFastRandomContext(CKey& key)
+static void MakeNewKeyWithFastRandomContext(CKey& key, FastRandomContext& rand_ctx = g_insecure_rand_ctx)
{
std::vector<unsigned char> keydata;
- keydata = g_insecure_rand_ctx.randbytes(32);
+ keydata = rand_ctx.randbytes(32);
key.Set(keydata.data(), keydata.data() + keydata.size(), /*fCompressedIn=*/true);
assert(key.IsValid());
}
+// Creates a transaction with 2 outputs. Spends all outpoints. If outpoints is empty, spends a random one.
+static CTransactionRef MakeTransactionSpending(const std::vector<COutPoint>& outpoints, FastRandomContext& det_rand)
+{
+ CKey key;
+ MakeNewKeyWithFastRandomContext(key, det_rand);
+ CMutableTransaction tx;
+ // If no outpoints are given, create a random one.
+ if (outpoints.empty()) {
+ tx.vin.emplace_back(Txid::FromUint256(det_rand.rand256()), 0);
+ } else {
+ for (const auto& outpoint : outpoints) {
+ tx.vin.emplace_back(outpoint);
+ }
+ }
+ // Ensure txid != wtxid
+ tx.vin[0].scriptWitness.stack.push_back({1});
+ tx.vout.resize(2);
+ tx.vout[0].nValue = CENT;
+ tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
+ tx.vout[1].nValue = 3 * CENT;
+ tx.vout[1].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(key.GetPubKey()));
+ return MakeTransactionRef(tx);
+}
+
+static bool EqualTxns(const std::set<CTransactionRef>& set_txns, const std::vector<CTransactionRef>& vec_txns)
+{
+ if (vec_txns.size() != set_txns.size()) return false;
+ for (const auto& tx : vec_txns) {
+ if (!set_txns.contains(tx)) return false;
+ }
+ return true;
+}
+static bool EqualTxns(const std::set<CTransactionRef>& set_txns,
+ const std::vector<std::pair<CTransactionRef, NodeId>>& vec_txns)
+{
+ if (vec_txns.size() != set_txns.size()) return false;
+ for (const auto& [tx, nodeid] : vec_txns) {
+ if (!set_txns.contains(tx)) return false;
+ }
+ return true;
+}
+
BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
{
// This test had non-deterministic coverage due to
@@ -138,4 +180,105 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
BOOST_CHECK(orphanage.CountOrphans() == 0);
}
+BOOST_AUTO_TEST_CASE(get_children)
+{
+ FastRandomContext det_rand{true};
+ std::vector<COutPoint> empty_outpoints;
+
+ auto parent1 = MakeTransactionSpending(empty_outpoints, det_rand);
+ auto parent2 = MakeTransactionSpending(empty_outpoints, det_rand);
+
+ // Make sure these parents have different txids otherwise this test won't make sense.
+ while (parent1->GetHash() == parent2->GetHash()) {
+ parent2 = MakeTransactionSpending(empty_outpoints, det_rand);
+ }
+
+ // Create children to go into orphanage.
+ auto child_p1n0 = MakeTransactionSpending({{parent1->GetHash(), 0}}, det_rand);
+ auto child_p2n1 = MakeTransactionSpending({{parent2->GetHash(), 1}}, det_rand);
+ // Spends the same tx twice. Should not cause duplicates.
+ auto child_p1n0_p1n1 = MakeTransactionSpending({{parent1->GetHash(), 0}, {parent1->GetHash(), 1}}, det_rand);
+ // Spends the same outpoint as previous tx. Should still be returned; don't assume outpoints are unique.
+ auto child_p1n0_p2n0 = MakeTransactionSpending({{parent1->GetHash(), 0}, {parent2->GetHash(), 0}}, det_rand);
+
+ const NodeId node1{1};
+ const NodeId node2{2};
+
+ // All orphans provided by node1
+ {
+ TxOrphanage orphanage;
+ BOOST_CHECK(orphanage.AddTx(child_p1n0, node1));
+ BOOST_CHECK(orphanage.AddTx(child_p2n1, node1));
+ BOOST_CHECK(orphanage.AddTx(child_p1n0_p1n1, node1));
+ BOOST_CHECK(orphanage.AddTx(child_p1n0_p2n0, node1));
+
+ std::set<CTransactionRef> expected_parent1_children{child_p1n0, child_p1n0_p2n0, child_p1n0_p1n1};
+ std::set<CTransactionRef> expected_parent2_children{child_p2n1, child_p1n0_p2n0};
+
+ BOOST_CHECK(EqualTxns(expected_parent1_children, orphanage.GetChildrenFromSamePeer(parent1, node1)));
+ BOOST_CHECK(EqualTxns(expected_parent2_children, orphanage.GetChildrenFromSamePeer(parent2, node1)));
+
+ BOOST_CHECK(EqualTxns(expected_parent1_children, orphanage.GetChildrenFromDifferentPeer(parent1, node2)));
+ BOOST_CHECK(EqualTxns(expected_parent2_children, orphanage.GetChildrenFromDifferentPeer(parent2, node2)));
+
+ // The peer must match
+ BOOST_CHECK(orphanage.GetChildrenFromSamePeer(parent1, node2).empty());
+ BOOST_CHECK(orphanage.GetChildrenFromSamePeer(parent2, node2).empty());
+
+ // There shouldn't be any children of this tx in the orphanage
+ BOOST_CHECK(orphanage.GetChildrenFromSamePeer(child_p1n0_p2n0, node1).empty());
+ BOOST_CHECK(orphanage.GetChildrenFromSamePeer(child_p1n0_p2n0, node2).empty());
+ BOOST_CHECK(orphanage.GetChildrenFromDifferentPeer(child_p1n0_p2n0, node1).empty());
+ BOOST_CHECK(orphanage.GetChildrenFromDifferentPeer(child_p1n0_p2n0, node2).empty());
+ }
+
+ // Orphans provided by node1 and node2
+ {
+ TxOrphanage orphanage;
+ BOOST_CHECK(orphanage.AddTx(child_p1n0, node1));
+ BOOST_CHECK(orphanage.AddTx(child_p2n1, node1));
+ BOOST_CHECK(orphanage.AddTx(child_p1n0_p1n1, node2));
+ BOOST_CHECK(orphanage.AddTx(child_p1n0_p2n0, node2));
+
+ // +----------------+---------------+----------------------------------+
+ // | | sender=node1 | sender=node2 |
+ // +----------------+---------------+----------------------------------+
+ // | spends parent1 | child_p1n0 | child_p1n0_p1n1, child_p1n0_p2n0 |
+ // | spends parent2 | child_p2n1 | child_p1n0_p2n0 |
+ // +----------------+---------------+----------------------------------+
+
+ // Children of parent1 from node1:
+ {
+ std::set<CTransactionRef> expected_parent1_node1{child_p1n0};
+
+ BOOST_CHECK(EqualTxns(expected_parent1_node1, orphanage.GetChildrenFromSamePeer(parent1, node1)));
+ BOOST_CHECK(EqualTxns(expected_parent1_node1, orphanage.GetChildrenFromDifferentPeer(parent1, node2)));
+ }
+
+ // Children of parent2 from node1:
+ {
+ std::set<CTransactionRef> expected_parent2_node1{child_p2n1};
+
+ BOOST_CHECK(EqualTxns(expected_parent2_node1, orphanage.GetChildrenFromSamePeer(parent2, node1)));
+ BOOST_CHECK(EqualTxns(expected_parent2_node1, orphanage.GetChildrenFromDifferentPeer(parent2, node2)));
+ }
+
+ // Children of parent1 from node2:
+ {
+ std::set<CTransactionRef> expected_parent1_node2{child_p1n0_p1n1, child_p1n0_p2n0};
+
+ BOOST_CHECK(EqualTxns(expected_parent1_node2, orphanage.GetChildrenFromSamePeer(parent1, node2)));
+ BOOST_CHECK(EqualTxns(expected_parent1_node2, orphanage.GetChildrenFromDifferentPeer(parent1, node1)));
+ }
+
+ // Children of parent2 from node2:
+ {
+ std::set<CTransactionRef> expected_parent2_node2{child_p1n0_p2n0};
+
+ BOOST_CHECK(EqualTxns(expected_parent2_node2, orphanage.GetChildrenFromSamePeer(parent2, node2)));
+ BOOST_CHECK(EqualTxns(expected_parent2_node2, orphanage.GetChildrenFromDifferentPeer(parent2, node1)));
+ }
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index b948ea8acb..8112f5f685 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -8,9 +8,12 @@
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <script/script.h>
+#include <serialize.h>
+#include <streams.h>
#include <test/util/random.h>
#include <test/util/script.h>
#include <test/util/setup_common.h>
+#include <util/strencodings.h>
#include <test/util/txmempool.h>
#include <validation.h>
@@ -40,6 +43,93 @@ inline CTransactionRef create_placeholder_tx(size_t num_inputs, size_t num_outpu
return MakeTransactionRef(mtx);
}
+// Create a Wtxid from a hex string
+inline Wtxid WtxidFromString(std::string_view str)
+{
+ return Wtxid::FromUint256(uint256S(str.data()));
+}
+
+BOOST_FIXTURE_TEST_CASE(package_hash_tests, TestChain100Setup)
+{
+ // Random real segwit transaction
+ DataStream stream_1{
+ ParseHex("02000000000101964b8aa63509579ca6086e6012eeaa4c2f4dd1e283da29b67c8eea38b3c6fd220000000000fdffffff0294c618000000000017a9145afbbb42f4e83312666d0697f9e66259912ecde38768fa2c0000000000160014897388a0889390fd0e153a22bb2cf9d8f019faf50247304402200547406380719f84d68cf4e96cc3e4a1688309ef475b150be2b471c70ea562aa02206d255f5acc40fd95981874d77201d2eb07883657ce1c796513f32b6079545cdf0121023ae77335cefcb5ab4c1dc1fb0d2acfece184e593727d7d5906c78e564c7c11d125cf0c00"),
+ };
+ CTransaction tx_1(deserialize, TX_WITH_WITNESS, stream_1);
+ CTransactionRef ptx_1{MakeTransactionRef(tx_1)};
+
+ // Random real nonsegwit transaction
+ DataStream stream_2{
+ ParseHex("01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339ffffffff021bff3d11000000001976a91404943fdd508053c75000106d3bc6e2754dbcff1988ac2f15de00000000001976a914a266436d2965547608b9e15d9032a7b9d64fa43188ac00000000"),
+ };
+ CTransaction tx_2(deserialize, TX_WITH_WITNESS, stream_2);
+ CTransactionRef ptx_2{MakeTransactionRef(tx_2)};
+
+ // Random real segwit transaction
+ DataStream stream_3{
+ ParseHex("0200000000010177862801f77c2c068a70372b4c435ef8dd621291c36a64eb4dd491f02218f5324600000000fdffffff014a0100000000000022512035ea312034cfac01e956a269f3bf147f569c2fbb00180677421262da042290d803402be713325ff285e66b0380f53f2fae0d0fb4e16f378a440fed51ce835061437566729d4883bc917632f3cff474d6384bc8b989961a1d730d4a87ed38ad28bd337b20f1d658c6c138b1c312e072b4446f50f01ae0da03a42e6274f8788aae53416a7fac0063036f7264010118746578742f706c61696e3b636861727365743d7574662d3800357b2270223a226272632d3230222c226f70223a226d696e74222c227469636b223a224342414c222c22616d74223a2236393639227d6821c1f1d658c6c138b1c312e072b4446f50f01ae0da03a42e6274f8788aae53416a7f00000000"),
+ };
+ CTransaction tx_3(deserialize, TX_WITH_WITNESS, stream_3);
+ CTransactionRef ptx_3{MakeTransactionRef(tx_3)};
+
+ // It's easy to see that wtxids are sorted in lexicographical order:
+ Wtxid wtxid_1{WtxidFromString("0x85cd1a31eb38f74ed5742ec9cb546712ab5aaf747de28a9168b53e846cbda17f")};
+ Wtxid wtxid_2{WtxidFromString("0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b")};
+ Wtxid wtxid_3{WtxidFromString("0xe065bac15f62bb4e761d761db928ddee65a47296b2b776785abb912cdec474e3")};
+ BOOST_CHECK_EQUAL(tx_1.GetWitnessHash(), wtxid_1);
+ BOOST_CHECK_EQUAL(tx_2.GetWitnessHash(), wtxid_2);
+ BOOST_CHECK_EQUAL(tx_3.GetWitnessHash(), wtxid_3);
+
+ BOOST_CHECK(wtxid_1.GetHex() < wtxid_2.GetHex());
+ BOOST_CHECK(wtxid_2.GetHex() < wtxid_3.GetHex());
+
+ // The txids are not (we want to test that sorting and hashing use wtxid, not txid):
+ Txid txid_1{TxidFromString("0xbd0f71c1d5e50589063e134fad22053cdae5ab2320db5bf5e540198b0b5a4e69")};
+ Txid txid_2{TxidFromString("0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b")};
+ Txid txid_3{TxidFromString("0xee707be5201160e32c4fc715bec227d1aeea5940fb4295605e7373edce3b1a93")};
+ BOOST_CHECK_EQUAL(tx_1.GetHash(), txid_1);
+ BOOST_CHECK_EQUAL(tx_2.GetHash(), txid_2);
+ BOOST_CHECK_EQUAL(tx_3.GetHash(), txid_3);
+
+ BOOST_CHECK(txid_2.GetHex() < txid_1.GetHex());
+
+ BOOST_CHECK(txid_1.ToUint256() != wtxid_1.ToUint256());
+ BOOST_CHECK(txid_2.ToUint256() == wtxid_2.ToUint256());
+ BOOST_CHECK(txid_3.ToUint256() != wtxid_3.ToUint256());
+
+ // We are testing that both functions compare using GetHex() and not uint256.
+ // (in this pair of wtxids, hex string order != uint256 order)
+ BOOST_CHECK(wtxid_2 < wtxid_1);
+ // (in this pair of wtxids, hex string order == uint256 order)
+ BOOST_CHECK(wtxid_2 < wtxid_3);
+
+ // All permutations of the package containing ptx_1, ptx_2, ptx_3 have the same package hash
+ std::vector<CTransactionRef> package_123{ptx_1, ptx_2, ptx_3};
+ std::vector<CTransactionRef> package_132{ptx_1, ptx_3, ptx_2};
+ std::vector<CTransactionRef> package_231{ptx_2, ptx_3, ptx_1};
+ std::vector<CTransactionRef> package_213{ptx_2, ptx_1, ptx_3};
+ std::vector<CTransactionRef> package_312{ptx_3, ptx_1, ptx_2};
+ std::vector<CTransactionRef> package_321{ptx_3, ptx_2, ptx_1};
+
+ uint256 calculated_hash_123 = (HashWriter() << wtxid_1 << wtxid_2 << wtxid_3).GetSHA256();
+
+ uint256 hash_if_by_txid = (HashWriter() << wtxid_2 << wtxid_1 << wtxid_3).GetSHA256();
+ BOOST_CHECK(hash_if_by_txid != calculated_hash_123);
+
+ uint256 hash_if_use_txid = (HashWriter() << txid_2 << txid_1 << txid_3).GetSHA256();
+ BOOST_CHECK(hash_if_use_txid != calculated_hash_123);
+
+ uint256 hash_if_use_int_order = (HashWriter() << wtxid_2 << wtxid_1 << wtxid_3).GetSHA256();
+ BOOST_CHECK(hash_if_use_int_order != calculated_hash_123);
+
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_123));
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_132));
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_231));
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_213));
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_312));
+ BOOST_CHECK_EQUAL(calculated_hash_123, GetPackageHash(package_321));
+}
+
BOOST_FIXTURE_TEST_CASE(package_sanitization_tests, TestChain100Setup)
{
// Packages can't have more than 25 transactions.
@@ -190,6 +280,9 @@ BOOST_FIXTURE_TEST_CASE(noncontextual_package_tests, TestChain100Setup)
BOOST_CHECK_EQUAL(state.GetRejectReason(), "package-not-sorted");
BOOST_CHECK(IsChildWithParents({tx_parent, tx_child}));
BOOST_CHECK(IsChildWithParentsTree({tx_parent, tx_child}));
+ BOOST_CHECK(GetPackageHash({tx_parent}) != GetPackageHash({tx_child}));
+ BOOST_CHECK(GetPackageHash({tx_child, tx_child}) != GetPackageHash({tx_child}));
+ BOOST_CHECK(GetPackageHash({tx_child, tx_parent}) != GetPackageHash({tx_child, tx_child}));
}
// 24 Parents and 1 Child
@@ -450,6 +543,8 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
BOOST_CHECK_EQUAL(ptx_child1->GetHash(), ptx_child2->GetHash());
// child1 and child2 have different wtxids
BOOST_CHECK(ptx_child1->GetWitnessHash() != ptx_child2->GetWitnessHash());
+ // Check that they have different package hashes
+ BOOST_CHECK(GetPackageHash({ptx_parent, ptx_child1}) != GetPackageHash({ptx_parent, ptx_child2}));
// Try submitting Package1{parent, child1} and Package2{parent, child2} where the children are
// same-txid-different-witness.
@@ -503,7 +598,8 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
/*output_destination=*/grandchild_locking_script,
/*output_amount=*/CAmount(47 * COIN), /*submit=*/false);
CTransactionRef ptx_grandchild = MakeTransactionRef(mtx_grandchild);
-
+ // Check that they have different package hashes
+ BOOST_CHECK(GetPackageHash({ptx_child1, ptx_grandchild}) != GetPackageHash({ptx_child2, ptx_grandchild}));
// We already submitted child1 above.
{
Package package_child2_grandchild{ptx_child2, ptx_grandchild};
diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp
index e907fffd4f..82206e56c3 100644
--- a/src/txorphanage.cpp
+++ b/src/txorphanage.cpp
@@ -241,3 +241,77 @@ void TxOrphanage::EraseForBlock(const CBlock& block)
LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
}
+
+std::vector<CTransactionRef> TxOrphanage::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const
+{
+ LOCK(m_mutex);
+
+ // First construct a vector of iterators to ensure we do not return duplicates of the same tx
+ // and so we can sort by nTimeExpire.
+ std::vector<OrphanMap::iterator> iters;
+
+ // For each output, get all entries spending this prevout, filtering for ones from the specified peer.
+ for (unsigned int i = 0; i < parent->vout.size(); i++) {
+ const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i));
+ if (it_by_prev != m_outpoint_to_orphan_it.end()) {
+ for (const auto& elem : it_by_prev->second) {
+ if (elem->second.fromPeer == nodeid) {
+ iters.emplace_back(elem);
+ }
+ }
+ }
+ }
+
+ // Sort by address so that duplicates can be deleted. At the same time, sort so that more recent
+ // orphans (which expire later) come first. Break ties based on address, as nTimeExpire is
+ // quantified in seconds and it is possible for orphans to have the same expiry.
+ std::sort(iters.begin(), iters.end(), [](const auto& lhs, const auto& rhs) {
+ if (lhs->second.nTimeExpire == rhs->second.nTimeExpire) {
+ return &(*lhs) < &(*rhs);
+ } else {
+ return lhs->second.nTimeExpire > rhs->second.nTimeExpire;
+ }
+ });
+ // Erase duplicates
+ iters.erase(std::unique(iters.begin(), iters.end()), iters.end());
+
+ // Convert to a vector of CTransactionRef
+ std::vector<CTransactionRef> children_found;
+ children_found.reserve(iters.size());
+ for (const auto child_iter : iters) {
+ children_found.emplace_back(child_iter->second.tx);
+ }
+ return children_found;
+}
+
+std::vector<std::pair<CTransactionRef, NodeId>> TxOrphanage::GetChildrenFromDifferentPeer(const CTransactionRef& parent, NodeId nodeid) const
+{
+ LOCK(m_mutex);
+
+ // First construct vector of iterators to ensure we do not return duplicates of the same tx.
+ std::vector<OrphanMap::iterator> iters;
+
+ // For each output, get all entries spending this prevout, filtering for ones not from the specified peer.
+ for (unsigned int i = 0; i < parent->vout.size(); i++) {
+ const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i));
+ if (it_by_prev != m_outpoint_to_orphan_it.end()) {
+ for (const auto& elem : it_by_prev->second) {
+ if (elem->second.fromPeer != nodeid) {
+ iters.emplace_back(elem);
+ }
+ }
+ }
+ }
+
+ // Erase duplicates
+ std::sort(iters.begin(), iters.end(), IteratorComparator());
+ iters.erase(std::unique(iters.begin(), iters.end()), iters.end());
+
+ // Convert iterators to pair<CTransactionRef, NodeId>
+ std::vector<std::pair<CTransactionRef, NodeId>> children_found;
+ children_found.reserve(iters.size());
+ for (const auto child_iter : iters) {
+ children_found.emplace_back(child_iter->second.tx, child_iter->second.fromPeer);
+ }
+ return children_found;
+}
diff --git a/src/txorphanage.h b/src/txorphanage.h
index 2fd14e6fd2..a3c8edaa2a 100644
--- a/src/txorphanage.h
+++ b/src/txorphanage.h
@@ -51,6 +51,14 @@ public:
/** Does this peer have any work to do? */
bool HaveTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);;
+ /** Get all children that spend from this tx and were received from nodeid. Sorted from most
+ * recent to least recent. */
+ std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+
+ /** Get all children that spend from this tx but were not received from nodeid. Also return
+ * which peer provided each tx. */
+ std::vector<std::pair<CTransactionRef, NodeId>> GetChildrenFromDifferentPeer(const CTransactionRef& parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+
/** Return how many entries exist in the orphange */
size_t Size() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{