aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp35
1 files changed, 33 insertions, 2 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index ca701a7e5b..ef7f486e0b 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -187,7 +187,7 @@ namespace {
* million to make it highly unlikely for users to have issues with this
* filter.
*
- * We only need to add wtxids to this filter. For non-segwit
+ * We typically only add wtxids to this filter. For non-segwit
* transactions, the txid == wtxid, so this only prevents us from
* re-downloading non-segwit transactions when communicating with
* non-wtxidrelay peers -- which is important for avoiding malleation
@@ -196,6 +196,12 @@ namespace {
* the reject filter store wtxids is exactly what we want to avoid
* redownload of a rejected transaction.
*
+ * In cases where we can tell that a segwit transaction will fail
+ * validation no matter the witness, we may add the txid of such
+ * transaction to the filter as well. This can be helpful when
+ * communicating with txid-relay peers or if we were to otherwise fetch a
+ * transaction via txid (eg in our orphan handling).
+ *
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
@@ -1161,6 +1167,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
+ case TxValidationResult::TX_INPUTS_NOT_STANDARD:
case TxValidationResult::TX_NOT_STANDARD:
case TxValidationResult::TX_MISSING_INPUTS:
case TxValidationResult::TX_PREMATURE_SPEND:
@@ -2053,6 +2060,19 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(orphanTx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) {
+ // We only add the txid if it differs from the wtxid, to
+ // avoid wasting entries in the rolling bloom filter.
+ recentRejects->insert(orphanTx.GetHash());
+ }
}
EraseOrphanTx(orphanHash);
done = true;
@@ -2940,7 +2960,7 @@ void ProcessMessage(
// We do the AlreadyHave() check using wtxid, rather than txid - in the
// absence of witness malleation, this is strictly better, because the
- // recent rejects filter may contain the wtxid but will never contain
+ // recent rejects filter may contain the wtxid but rarely contains
// the txid of a segwit transaction that has been rejected.
// In the presence of witness malleation, it's possible that by only
// doing the check with wtxid, we could overlook a transaction which
@@ -3034,6 +3054,17 @@ void ProcessMessage(
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
+ recentRejects->insert(tx.GetHash());
+ }
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}