aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Rubin <j@rubin.io>2020-01-14 11:31:36 -0800
committerJeremy Rubin <j@rubin.io>2020-01-14 19:30:13 -0800
commit2ccb7cca4ac67198ac89bd58f5b4ae41a5163ceb (patch)
tree13cdb90b884765f807c580eba27a342499c7374f
parentceb789cf3a9075729efa07f5114ce0369d8606c3 (diff)
Add Epoch Guards to CTXMemPoolEntry and CTxMemPool
-rw-r--r--src/txmempool.cpp23
-rw-r--r--src/txmempool.h52
2 files changed, 73 insertions, 2 deletions
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 441255182e..64b91d6296 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -23,7 +23,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFe
int64_t _nTime, unsigned int _entryHeight,
bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp)
: tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight),
- spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp)
+ spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0)
{
nCountWithDescendants = 1;
nSizeWithDescendants = GetTxSize();
@@ -325,7 +325,7 @@ void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee,
}
CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator)
- : nTransactionsUpdated(0), minerPolicyEstimator(estimator)
+ : nTransactionsUpdated(0), minerPolicyEstimator(estimator), m_epoch(0), m_has_epoch_guard(false)
{
_clear(); //lock free clear
@@ -1105,4 +1105,23 @@ void CTxMemPool::SetIsLoaded(bool loaded)
m_is_loaded = loaded;
}
+
+CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const
+{
+ return EpochGuard(*this);
+}
+CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in)
+{
+ assert(!pool.m_has_epoch_guard);
+ ++pool.m_epoch;
+ pool.m_has_epoch_guard = true;
+}
+
+CTxMemPool::EpochGuard::~EpochGuard()
+{
+ // prevents stale results being used
+ ++pool.m_epoch;
+ pool.m_has_epoch_guard = false;
+}
+
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
diff --git a/src/txmempool.h b/src/txmempool.h
index e9f1f1c8a1..a9551706ed 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -129,6 +129,7 @@ public:
int64_t GetSigOpCostWithAncestors() const { return nSigOpCostWithAncestors; }
mutable size_t vTxHashesIdx; //!< Index in mempool's vTxHashes
+ mutable uint64_t m_epoch; //!< epoch when last touched, useful for graph algorithms
};
// Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index.
@@ -453,6 +454,8 @@ private:
mutable int64_t lastRollingFeeUpdate;
mutable bool blockSinceLastRollingFeeBump;
mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially
+ mutable uint64_t m_epoch;
+ mutable bool m_has_epoch_guard;
void trackPackageRemoved(const CFeeRate& rate) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -736,6 +739,55 @@ private:
* removal.
*/
void removeUnchecked(txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
+public:
+ /** EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms.
+ * When walking ancestors or descendants, we generally want to avoid
+ * visiting the same transactions twice. Some traversal algorithms use
+ * std::set (or setEntries) to deduplicate the transaction we visit.
+ * However, use of std::set is algorithmically undesirable because it both
+ * adds an asymptotic factor of O(log n) to traverals cost and triggers O(n)
+ * more dynamic memory allocations.
+ * In many algorithms we can replace std::set with an internal mempool
+ * counter to track the time (or, "epoch") that we began a traversal, and
+ * check + update a per-transaction epoch for each transaction we look at to
+ * determine if that transaction has not yet been visited during the current
+ * traversal's epoch.
+ * Algorithms using std::set can be replaced on a one by one basis.
+ * Both techniques are not fundamentally incomaptible across the codebase.
+ * Generally speaking, however, the remaining use of std::set for mempool
+ * traversal should be viewed as a TODO for replacement with an epoch based
+ * traversal, rather than a preference for std::set over epochs in that
+ * algorithm.
+ */
+ class EpochGuard {
+ const CTxMemPool& pool;
+ public:
+ EpochGuard(const CTxMemPool& in);
+ ~EpochGuard();
+ };
+ // N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction
+ // (and later destruction)
+ EpochGuard GetFreshEpoch() const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ /** visited marks a CTxMemPoolEntry as having been traversed
+ * during the lifetime of the most recently created EpochGuard
+ * and returns false if we are the first visitor, true otherwise.
+ *
+ * An EpochGuard must be held when visited is called or an assert will be
+ * triggered.
+ *
+ */
+ bool visited(txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
+ assert(m_has_epoch_guard);
+ bool ret = it->m_epoch >= m_epoch;
+ it->m_epoch = std::max(it->m_epoch, m_epoch);
+ return ret;
+ }
+
+ bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
+ assert(m_has_epoch_guard);
+ return !it || visited(*it);
+ }
};
/**