aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/Makefile.am1
-rw-r--r--src/txmempool.cpp23
-rw-r--r--src/txmempool.h55
-rw-r--r--src/util/epochguard.h91
4 files changed, 107 insertions, 63 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index 8a9ef49a34..eae226b1d4 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -231,6 +231,7 @@ BITCOIN_CORE_H = \
util/bip32.h \
util/bytevectorhash.h \
util/check.h \
+ util/epochguard.h \
util/error.h \
util/fees.h \
util/getuniquepath.h \
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 899835019a..48424a75d0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -23,7 +23,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFe
int64_t _nTime, unsigned int _entryHeight,
bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp)
: tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight),
- spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0)
+ spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp)
{
nCountWithDescendants = 1;
nSizeWithDescendants = GetTxSize();
@@ -132,7 +132,7 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
// include them, and update their CTxMemPoolEntry::m_parents to include this tx.
// we cache the in-mempool children to avoid duplicate updates
{
- const auto epoch = GetFreshEpoch();
+ WITH_FRESH_EPOCH(m_epoch);
for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) {
const uint256 &childHash = iter->second->GetHash();
txiter childIter = mapTx.find(childHash);
@@ -1119,22 +1119,3 @@ void CTxMemPool::SetIsLoaded(bool loaded)
LOCK(cs);
m_is_loaded = loaded;
}
-
-
-CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const
-{
- return EpochGuard(*this);
-}
-CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in)
-{
- assert(!pool.m_has_epoch_guard);
- ++pool.m_epoch;
- pool.m_has_epoch_guard = true;
-}
-
-CTxMemPool::EpochGuard::~EpochGuard()
-{
- // prevents stale results being used
- ++pool.m_epoch;
- pool.m_has_epoch_guard = false;
-}
diff --git a/src/txmempool.h b/src/txmempool.h
index b8de326737..143048b205 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -21,6 +21,7 @@
#include <primitives/transaction.h>
#include <random.h>
#include <sync.h>
+#include <util/epochguard.h>
#include <util/hasher.h>
#include <boost/multi_index_container.hpp>
@@ -64,6 +65,7 @@ struct CompareIteratorByHash {
return a->GetTx().GetHash() < b->GetTx().GetHash();
}
};
+
/** \class CTxMemPoolEntry
*
* CTxMemPoolEntry stores data about the corresponding transaction, as well
@@ -156,7 +158,7 @@ public:
Children& GetMemPoolChildren() const { return m_children; }
mutable size_t vTxHashesIdx; //!< Index in mempool's vTxHashes
- mutable uint64_t m_epoch; //!< epoch when last touched, useful for graph algorithms
+ mutable Epoch::Marker m_epoch_marker; //!< epoch when last touched, useful for graph algorithms
};
// Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index.
@@ -486,8 +488,7 @@ private:
mutable int64_t lastRollingFeeUpdate;
mutable bool blockSinceLastRollingFeeBump;
mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially
- mutable uint64_t m_epoch{0};
- mutable bool m_has_epoch_guard{false};
+ mutable Epoch m_epoch GUARDED_BY(cs);
// In-memory counter for external mempool tracking purposes.
// This number is incremented once every time a transaction
@@ -666,7 +667,7 @@ public:
* for). Note: vHashesToUpdate should be the set of transactions from the
* disconnected block that have been accepted back into the mempool.
*/
- void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
+ void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
/** Try to calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
@@ -827,52 +828,22 @@ private:
*/
void removeUnchecked(txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
- /** EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms.
- * When walking ancestors or descendants, we generally want to avoid
- * visiting the same transactions twice. Some traversal algorithms use
- * std::set (or setEntries) to deduplicate the transaction we visit.
- * However, use of std::set is algorithmically undesirable because it both
- * adds an asymptotic factor of O(log n) to traverals cost and triggers O(n)
- * more dynamic memory allocations.
- * In many algorithms we can replace std::set with an internal mempool
- * counter to track the time (or, "epoch") that we began a traversal, and
- * check + update a per-transaction epoch for each transaction we look at to
- * determine if that transaction has not yet been visited during the current
- * traversal's epoch.
- * Algorithms using std::set can be replaced on a one by one basis.
- * Both techniques are not fundamentally incompatible across the codebase.
- * Generally speaking, however, the remaining use of std::set for mempool
- * traversal should be viewed as a TODO for replacement with an epoch based
- * traversal, rather than a preference for std::set over epochs in that
- * algorithm.
- */
- class EpochGuard {
- const CTxMemPool& pool;
- public:
- explicit EpochGuard(const CTxMemPool& in);
- ~EpochGuard();
- };
- // N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction
- // (and later destruction)
- EpochGuard GetFreshEpoch() const EXCLUSIVE_LOCKS_REQUIRED(cs);
-
/** visited marks a CTxMemPoolEntry as having been traversed
- * during the lifetime of the most recently created EpochGuard
+ * during the lifetime of the most recently created Epoch::Guard
* and returns false if we are the first visitor, true otherwise.
*
- * An EpochGuard must be held when visited is called or an assert will be
+ * An Epoch::Guard must be held when visited is called or an assert will be
* triggered.
*
*/
- bool visited(txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
- assert(m_has_epoch_guard);
- bool ret = it->m_epoch >= m_epoch;
- it->m_epoch = std::max(it->m_epoch, m_epoch);
- return ret;
+ bool visited(const txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
+ {
+ return m_epoch.visited(it->m_epoch_marker);
}
- bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
- assert(m_has_epoch_guard);
+ bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
+ {
+ assert(m_epoch.guarded()); // verify guard even when it==nullopt
return !it || visited(*it);
}
};
diff --git a/src/util/epochguard.h b/src/util/epochguard.h
new file mode 100644
index 0000000000..1570ec4eb4
--- /dev/null
+++ b/src/util/epochguard.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_EPOCHGUARD_H
+#define BITCOIN_UTIL_EPOCHGUARD_H
+
+#include <threadsafety.h>
+
+#include <cassert>
+
+/** Epoch: RAII-style guard for using epoch-based graph traversal algorithms.
+ * When walking ancestors or descendants, we generally want to avoid
+ * visiting the same transactions twice. Some traversal algorithms use
+ * std::set (or setEntries) to deduplicate the transaction we visit.
+ * However, use of std::set is algorithmically undesirable because it both
+ * adds an asymptotic factor of O(log n) to traversals cost and triggers O(n)
+ * more dynamic memory allocations.
+ * In many algorithms we can replace std::set with an internal mempool
+ * counter to track the time (or, "epoch") that we began a traversal, and
+ * check + update a per-transaction epoch for each transaction we look at to
+ * determine if that transaction has not yet been visited during the current
+ * traversal's epoch.
+ * Algorithms using std::set can be replaced on a one by one basis.
+ * Both techniques are not fundamentally incompatible across the codebase.
+ * Generally speaking, however, the remaining use of std::set for mempool
+ * traversal should be viewed as a TODO for replacement with an epoch based
+ * traversal, rather than a preference for std::set over epochs in that
+ * algorithm.
+ */
+
+class LOCKABLE Epoch
+{
+private:
+ uint64_t m_raw_epoch = 0;
+ bool m_guarded = false;
+
+public:
+ Epoch() = default;
+ Epoch(const Epoch&) = delete;
+ Epoch& operator=(const Epoch&) = delete;
+
+ bool guarded() const { return m_guarded; }
+
+ class Marker
+ {
+ private:
+ uint64_t m_marker = 0;
+
+ // only allow modification via Epoch member functions
+ friend class Epoch;
+ Marker& operator=(const Marker&) = delete;
+ };
+
+ class SCOPED_LOCKABLE Guard
+ {
+ private:
+ Epoch& m_epoch;
+
+ public:
+ explicit Guard(Epoch& epoch) EXCLUSIVE_LOCK_FUNCTION(epoch) : m_epoch(epoch)
+ {
+ assert(!m_epoch.m_guarded);
+ ++m_epoch.m_raw_epoch;
+ m_epoch.m_guarded = true;
+ }
+ ~Guard() UNLOCK_FUNCTION()
+ {
+ assert(m_epoch.m_guarded);
+ ++m_epoch.m_raw_epoch; // ensure clear separation between epochs
+ m_epoch.m_guarded = false;
+ }
+ };
+
+ bool visited(Marker& marker) const EXCLUSIVE_LOCKS_REQUIRED(*this)
+ {
+ assert(m_guarded);
+ if (marker.m_marker < m_raw_epoch) {
+ // marker is from a previous epoch, so this is its first visit
+ marker.m_marker = m_raw_epoch;
+ return false;
+ } else {
+ return true;
+ }
+ }
+};
+
+#define WITH_FRESH_EPOCH(epoch) const Epoch::Guard PASTE2(epoch_guard_, __COUNTER__)(epoch)
+
+#endif // BITCOIN_UTIL_EPOCHGUARD_H