aboutsummaryrefslogtreecommitdiff
path: root/src/node
diff options
context:
space:
mode:
Diffstat (limited to 'src/node')
-rw-r--r--src/node/blockstorage.cpp245
-rw-r--r--src/node/blockstorage.h90
-rw-r--r--src/node/chainstate.cpp29
-rw-r--r--src/node/chainstate.h7
-rw-r--r--src/node/coinstats.cpp185
-rw-r--r--src/node/coinstats.h84
-rw-r--r--src/node/connection_types.cpp26
-rw-r--r--src/node/connection_types.h82
-rw-r--r--src/node/context.cpp6
-rw-r--r--src/node/context.h6
-rw-r--r--src/node/eviction.cpp240
-rw-r--r--src/node/eviction.h69
-rw-r--r--src/node/interface_ui.cpp (renamed from src/node/ui_interface.cpp)2
-rw-r--r--src/node/interface_ui.h (renamed from src/node/ui_interface.h)9
-rw-r--r--src/node/interfaces.cpp118
-rw-r--r--src/node/mempool_persist_args.cpp23
-rw-r--r--src/node/mempool_persist_args.h25
-rw-r--r--src/node/miner.cpp132
-rw-r--r--src/node/miner.h22
-rw-r--r--src/node/transaction.h1
20 files changed, 794 insertions, 607 deletions
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index cbfdcb6f11..103f4f0d7f 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -21,54 +21,95 @@
#include <util/system.h>
#include <validation.h>
+#include <unordered_map>
+
namespace node {
std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
-bool fHavePruned = false;
bool fPruneMode = false;
uint64_t nPruneTarget = 0;
+bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
+{
+ // First sort by most total work, ...
+ if (pa->nChainWork > pb->nChainWork) return false;
+ if (pa->nChainWork < pb->nChainWork) return true;
+
+ // ... then by earliest time received, ...
+ if (pa->nSequenceId < pb->nSequenceId) return false;
+ if (pa->nSequenceId > pb->nSequenceId) return true;
+
+ // Use pointer address as tie breaker (should only happen with blocks
+ // loaded from disk, as those all have id 0).
+ if (pa < pb) return false;
+ if (pa > pb) return true;
+
+ // Identical blocks.
+ return false;
+}
+
+bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
+{
+ return pa->nHeight < pb->nHeight;
+}
+
static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
static FlatFileSeq UndoFileSeq();
-CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
+std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
+{
+ AssertLockHeld(cs_main);
+ std::vector<CBlockIndex*> rv;
+ rv.reserve(m_block_index.size());
+ for (auto& [_, block_index] : m_block_index) {
+ rv.push_back(&block_index);
+ }
+ return rv;
+}
+
+CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
+{
+ AssertLockHeld(cs_main);
+ BlockMap::iterator it = m_block_index.find(hash);
+ return it == m_block_index.end() ? nullptr : &it->second;
+}
+
+const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
{
AssertLockHeld(cs_main);
BlockMap::const_iterator it = m_block_index.find(hash);
- return it == m_block_index.end() ? nullptr : it->second;
+ return it == m_block_index.end() ? nullptr : &it->second;
}
-CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
+CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
{
AssertLockHeld(cs_main);
- // Check for duplicate
- uint256 hash = block.GetHash();
- BlockMap::iterator it = m_block_index.find(hash);
- if (it != m_block_index.end()) {
- return it->second;
+ auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
+ if (!inserted) {
+ return &mi->second;
}
+ CBlockIndex* pindexNew = &(*mi).second;
- // Construct new block index object
- CBlockIndex* pindexNew = new CBlockIndex(block);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
- BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
if (miPrev != m_block_index.end()) {
- pindexNew->pprev = (*miPrev).second;
+ pindexNew->pprev = &(*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
pindexNew->BuildSkip();
}
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
- if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
- pindexBestHeader = pindexNew;
+ if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
+ best_header = pindexNew;
+ }
m_dirty_blockindex.insert(pindexNew);
@@ -80,8 +121,8 @@ void BlockManager::PruneOneBlockFile(const int fileNumber)
AssertLockHeld(cs_main);
LOCK(cs_LastBlockFile);
- for (const auto& entry : m_block_index) {
- CBlockIndex* pindex = entry.second;
+ for (auto& entry : m_block_index) {
+ CBlockIndex* pindex = &entry.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
@@ -185,12 +226,17 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr
}
}
- LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
+ LogPrint(BCLog::PRUNE, "target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
nLastBlockWeCanPrune, count);
}
+void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
+ AssertLockHeld(::cs_main);
+ m_prune_locks[name] = lock_info;
+}
+
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
{
AssertLockHeld(cs_main);
@@ -199,60 +245,27 @@ CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
return nullptr;
}
- // Return existing
- BlockMap::iterator mi = m_block_index.find(hash);
- if (mi != m_block_index.end()) {
- return (*mi).second;
+ const auto [mi, inserted]{m_block_index.try_emplace(hash)};
+ CBlockIndex* pindex = &(*mi).second;
+ if (inserted) {
+ pindex->phashBlock = &((*mi).first);
}
-
- // Create new
- CBlockIndex* pindexNew = new CBlockIndex();
- mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
- pindexNew->phashBlock = &((*mi).first);
-
- return pindexNew;
+ return pindex;
}
-bool BlockManager::LoadBlockIndex(
- const Consensus::Params& consensus_params,
- ChainstateManager& chainman)
+bool BlockManager::LoadBlockIndex(const Consensus::Params& consensus_params)
{
if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
return false;
}
// Calculate nChainWork
- std::vector<std::pair<int, CBlockIndex*>> vSortedByHeight;
- vSortedByHeight.reserve(m_block_index.size());
- for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
- CBlockIndex* pindex = item.second;
- vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
- }
- sort(vSortedByHeight.begin(), vSortedByHeight.end());
-
- // Find start of assumed-valid region.
- int first_assumed_valid_height = std::numeric_limits<int>::max();
-
- for (const auto& [height, block] : vSortedByHeight) {
- if (block->IsAssumedValid()) {
- auto chainstates = chainman.GetAll();
-
- // If we encounter an assumed-valid block index entry, ensure that we have
- // one chainstate that tolerates assumed-valid entries and another that does
- // not (i.e. the background validation chainstate), since assumed-valid
- // entries should always be pending validation by a fully-validated chainstate.
- auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
- assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
- assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
-
- first_assumed_valid_height = height;
- break;
- }
- }
+ std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
+ std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
+ CBlockIndexHeightOnlyComparator());
- for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight) {
+ for (CBlockIndex* pindex : vSortedByHeight) {
if (ShutdownRequested()) return false;
- CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
@@ -276,69 +289,14 @@ bool BlockManager::LoadBlockIndex(
pindex->nStatus |= BLOCK_FAILED_CHILD;
m_dirty_blockindex.insert(pindex);
}
- if (pindex->IsAssumedValid() ||
- (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
- (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
-
- // Fill each chainstate's block candidate set. Only add assumed-valid
- // blocks to the tip candidate set if the chainstate is allowed to rely on
- // assumed-valid blocks.
- //
- // If all setBlockIndexCandidates contained the assumed-valid blocks, the
- // background chainstate's ActivateBestChain() call would add assumed-valid
- // blocks to the chain (based on how FindMostWorkChain() works). Obviously
- // we don't want this since the purpose of the background validation chain
- // is to validate assued-valid blocks.
- //
- // Note: This is considering all blocks whose height is greater or equal to
- // the first assumed-valid block to be assumed-valid blocks, and excluding
- // them from the background chainstate's setBlockIndexCandidates set. This
- // does mean that some blocks which are not technically assumed-valid
- // (later blocks on a fork beginning before the first assumed-valid block)
- // might not get added to the the background chainstate, but this is ok,
- // because they will still be attached to the active chainstate if they
- // actually contain more work.
- //
- // Instead of this height-based approach, an earlier attempt was made at
- // detecting "holistically" whether the block index under consideration
- // relied on an assumed-valid ancestor, but this proved to be too slow to
- // be practical.
- for (CChainState* chainstate : chainman.GetAll()) {
- if (chainstate->reliesOnAssumedValid() ||
- pindex->nHeight < first_assumed_valid_height) {
- chainstate->setBlockIndexCandidates.insert(pindex);
- }
- }
- }
- if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
- chainman.m_best_invalid = pindex;
- }
if (pindex->pprev) {
pindex->BuildSkip();
}
- if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
- pindexBestHeader = pindex;
}
return true;
}
-void BlockManager::Unload()
-{
- m_blocks_unlinked.clear();
-
- for (const BlockMap::value_type& entry : m_block_index) {
- delete entry.second;
- }
-
- m_block_index.clear();
-
- m_blockfile_info.clear();
- m_last_blockfile = 0;
- m_dirty_blockindex.clear();
- m_dirty_fileinfo.clear();
-}
-
bool BlockManager::WriteBlockIndexDB()
{
AssertLockHeld(::cs_main);
@@ -360,9 +318,9 @@ bool BlockManager::WriteBlockIndexDB()
return true;
}
-bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
+bool BlockManager::LoadBlockIndexDB(const Consensus::Params& consensus_params)
{
- if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
+ if (!LoadBlockIndex(consensus_params)) {
return false;
}
@@ -386,10 +344,9 @@ bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
- for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
- CBlockIndex* pindex = item.second;
- if (pindex->nStatus & BLOCK_HAVE_DATA) {
- setBlkDataFiles.insert(pindex->nFile);
+ for (const auto& [_, block_index] : m_block_index) {
+ if (block_index.nStatus & BLOCK_HAVE_DATA) {
+ setBlkDataFiles.insert(block_index.nFile);
}
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
@@ -400,8 +357,8 @@ bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
}
// Check whether we have ever pruned block & undo files
- m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
- if (fHavePruned) {
+ m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
+ if (m_have_pruned) {
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
}
@@ -413,13 +370,13 @@ bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
return true;
}
-CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
+const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
{
const MapCheckpoints& checkpoints = data.mapCheckpoints;
for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
const uint256& hash = i.second;
- CBlockIndex* pindex = LookupBlockIndex(hash);
+ const CBlockIndex* pindex = LookupBlockIndex(hash);
if (pindex) {
return pindex;
}
@@ -427,9 +384,20 @@ CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
return nullptr;
}
-bool IsBlockPruned(const CBlockIndex* pblockindex)
+bool BlockManager::IsBlockPruned(const CBlockIndex* pblockindex)
{
- return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+ AssertLockHeld(::cs_main);
+ return (m_have_pruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
+const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& start_block)
+{
+ AssertLockHeld(::cs_main);
+ const CBlockIndex* last_block = &start_block;
+ while (last_block->pprev && (last_block->pprev->nStatus & BLOCK_HAVE_DATA)) {
+ last_block = last_block->pprev;
+ }
+ return last_block;
}
// If we're using -prune with -reindex, then delete block files that will be ignored by the
@@ -513,7 +481,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- FlatFilePos pos = pindex->GetUndoPos();
+ const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())};
+
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -712,6 +681,7 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
+ AssertLockHeld(::cs_main);
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
FlatFilePos _pos;
@@ -810,7 +780,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
}
block.resize(blk_size); // Zeroing of memory is intentional here
- filein.read((char*)block.data(), blk_size);
+ filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
}
@@ -818,17 +788,6 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
-{
- FlatFilePos block_pos;
- {
- LOCK(cs_main);
- block_pos = pindex->GetBlockPos();
- }
-
- return ReadRawBlockFromDisk(block, block_pos, message_start);
-}
-
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
{
@@ -864,7 +823,7 @@ struct CImportingNow {
}
};
-void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args)
+void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args, const fs::path& mempool_path)
{
SetSyscallSandboxPolicy(SyscallSandboxPolicy::INITIALIZATION_LOAD_BLOCKS);
ScheduleBatchPriority();
@@ -934,6 +893,6 @@ void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFile
return;
}
} // End scope of CImportingNow
- chainman.ActiveChainstate().LoadMempool(args);
+ chainman.ActiveChainstate().LoadMempool(mempool_path);
}
} // namespace node
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 78c9210892..9b76371aae 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -5,19 +5,24 @@
#ifndef BITCOIN_NODE_BLOCKSTORAGE_H
#define BITCOIN_NODE_BLOCKSTORAGE_H
+#include <attributes.h>
+#include <chain.h>
#include <fs.h>
-#include <protocol.h> // For CMessageHeader::MessageStartChars
+#include <protocol.h>
+#include <sync.h>
#include <txdb.h>
#include <atomic>
#include <cstdint>
+#include <unordered_map>
#include <vector>
+extern RecursiveMutex cs_main;
+
class ArgsManager;
class BlockValidationState;
class CBlock;
class CBlockFileInfo;
-class CBlockIndex;
class CBlockUndo;
class CChain;
class CChainParams;
@@ -42,19 +47,30 @@ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
extern std::atomic_bool fImporting;
extern std::atomic_bool fReindex;
/** Pruning-related variables and constants */
-/** True if any block files have ever been pruned. */
-extern bool fHavePruned;
/** True if we're running in -prune mode. */
extern bool fPruneMode;
-/** Number of MiB of block files that we're trying to stay below. */
+/** Number of bytes of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
-typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
+// Because validation code takes pointers to the map's CBlockIndex objects, if
+// we ever switch to another associative container, we need to either use a
+// container that has stable addressing (true of all std associative
+// containers), or make the key a `std::unique_ptr<CBlockIndex>`
+using BlockMap = std::unordered_map<uint256, CBlockIndex, BlockHasher>;
struct CBlockIndexWorkComparator {
bool operator()(const CBlockIndex* pa, const CBlockIndex* pb) const;
};
+struct CBlockIndexHeightOnlyComparator {
+ /* Only compares the height of two block indices, doesn't try to tie-break */
+ bool operator()(const CBlockIndex* pa, const CBlockIndex* pb) const;
+};
+
+struct PruneLockInfo {
+ int height_first{std::numeric_limits<int>::max()}; //! Height of earliest block that should be kept and not pruned
+};
+
/**
* Maintains a tree of blocks (stored in `m_block_index`) which is consulted
* to determine where the most-work tip is.
@@ -68,6 +84,13 @@ class BlockManager
friend ChainstateManager;
private:
+ /**
+ * Load the blocktree off disk and into memory. Populate certain metadata
+ * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
+ * collections like m_dirty_blockindex.
+ */
+ bool LoadBlockIndex(const Consensus::Params& consensus_params)
+ EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
void FlushUndoFile(int block_file, bool finalize = false);
bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown);
@@ -108,9 +131,19 @@ private:
/** Dirty block file entries. */
std::set<int> m_dirty_fileinfo;
+ /**
+ * Map from external index name to oldest block that must not be pruned.
+ *
+ * @note Internally, only blocks at height (height_first - PRUNE_LOCK_BUFFER - 1) and
+ * below will be pruned, but callers should avoid assuming any particular buffer size.
+ */
+ std::unordered_map<std::string, PruneLockInfo> m_prune_locks GUARDED_BY(::cs_main);
+
public:
BlockMap m_block_index GUARDED_BY(cs_main);
+ std::vector<CBlockIndex*> GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
/**
* All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
@@ -120,33 +153,23 @@ public:
std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
- bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ bool LoadBlockIndexDB(const Consensus::Params& consensus_params) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
- /**
- * Load the blocktree off disk and into memory. Populate certain metadata
- * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
- * collections like m_dirty_blockindex.
- */
- bool LoadBlockIndex(
- const Consensus::Params& consensus_params,
- ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- /** Clear all data members. */
- void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ CBlockIndex* AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Create a new block index entry for a given block hash */
CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
//! Mark one block file as pruned (modify associated database entries)
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ CBlockIndex* LookupBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ const CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Get block file info entry for one block file */
CBlockFileInfo* GetBlockFileInfo(size_t n);
- bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams);
+ bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
@@ -154,16 +177,20 @@ public:
uint64_t CalculateCurrentUsage();
//! Returns last CBlockIndex* that is a checkpoint
- CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ const CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- ~BlockManager()
- {
- Unload();
- }
-};
+ //! Find the first block that is not pruned
+ const CBlockIndex* GetFirstStoredBlock(const CBlockIndex& start_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
-//! Check whether the block associated with this index entry is pruned or not.
-bool IsBlockPruned(const CBlockIndex* pblockindex);
+ /** True if any block files have ever been pruned. */
+ bool m_have_pruned = false;
+
+ //! Check whether the block associated with this index entry is pruned or not.
+ bool IsBlockPruned(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
+ //! Create or update a prune lock identified by its name
+ void UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+};
void CleanupBlockRevFiles();
@@ -181,11 +208,10 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start);
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start);
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex);
-void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args);
+void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args, const fs::path& mempool_path);
} // namespace node
#endif // BITCOIN_NODE_BLOCKSTORAGE_H
diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp
index e21116dd7e..54ba5b7966 100644
--- a/src/node/chainstate.cpp
+++ b/src/node/chainstate.cpp
@@ -13,7 +13,6 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
ChainstateManager& chainman,
CTxMemPool* mempool,
bool fPruneMode,
- const Consensus::Params& consensus_params,
bool fReindexChainState,
int64_t nBlockTreeDBCache,
int64_t nCoinDBCache,
@@ -32,8 +31,6 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
chainman.m_total_coinstip_cache = nCoinCacheUsage;
chainman.m_total_coinsdb_cache = nCoinDBCache;
- UnloadBlockIndex(mempool, chainman);
-
auto& pblocktree{chainman.m_blockman.m_block_tree_db};
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
@@ -49,7 +46,7 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
if (shutdown_requested && shutdown_requested()) return ChainstateLoadingError::SHUTDOWN_PROBED;
- // LoadBlockIndex will load fHavePruned if we've ever removed a
+ // LoadBlockIndex will load m_have_pruned if we've ever removed a
// block file from disk.
// Note that it also sets fReindex based on the disk flag!
// From here on out fReindex and fReset mean something different!
@@ -59,13 +56,13 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
}
if (!chainman.BlockIndex().empty() &&
- !chainman.m_blockman.LookupBlockIndex(consensus_params.hashGenesisBlock)) {
+ !chainman.m_blockman.LookupBlockIndex(chainman.GetConsensus().hashGenesisBlock)) {
return ChainstateLoadingError::ERROR_BAD_GENESIS_BLOCK;
}
// Check for changed -prune state. What we are concerned about is a user who has pruned blocks
// in the past, but is now trying to run unpruned.
- if (fHavePruned && !fPruneMode) {
+ if (chainman.m_blockman.m_have_pruned && !fPruneMode) {
return ChainstateLoadingError::ERROR_PRUNED_NEEDS_REINDEX;
}
@@ -82,17 +79,17 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
for (CChainState* chainstate : chainman.GetAll()) {
chainstate->InitCoinsDB(
- /* cache_size_bytes */ nCoinDBCache,
- /* in_memory */ coins_db_in_memory,
- /* should_wipe */ fReset || fReindexChainState);
+ /*cache_size_bytes=*/nCoinDBCache,
+ /*in_memory=*/coins_db_in_memory,
+ /*should_wipe=*/fReset || fReindexChainState);
if (coins_error_cb) {
chainstate->CoinsErrorCatcher().AddReadErrCallback(coins_error_cb);
}
- // If necessary, upgrade from older database format.
+ // Refuse to load unsupported database format.
// This is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
- if (!chainstate->CoinsDB().Upgrade()) {
+ if (chainstate->CoinsDB().NeedsUpgrade()) {
return ChainstateLoadingError::ERROR_CHAINSTATE_UPGRADE_FAILED;
}
@@ -128,10 +125,8 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
std::optional<ChainstateLoadVerifyError> VerifyLoadedChainstate(ChainstateManager& chainman,
bool fReset,
bool fReindexChainState,
- const Consensus::Params& consensus_params,
- unsigned int check_blocks,
- unsigned int check_level,
- std::function<int64_t()> get_unix_time_seconds)
+ int check_blocks,
+ int check_level)
{
auto is_coinsview_empty = [&](CChainState* chainstate) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
return fReset || fReindexChainState || chainstate->CoinsTip().GetBestBlock().IsNull();
@@ -142,12 +137,12 @@ std::optional<ChainstateLoadVerifyError> VerifyLoadedChainstate(ChainstateManage
for (CChainState* chainstate : chainman.GetAll()) {
if (!is_coinsview_empty(chainstate)) {
const CBlockIndex* tip = chainstate->m_chain.Tip();
- if (tip && tip->nTime > get_unix_time_seconds() + MAX_FUTURE_BLOCK_TIME) {
+ if (tip && tip->nTime > GetTime() + MAX_FUTURE_BLOCK_TIME) {
return ChainstateLoadVerifyError::ERROR_BLOCK_FROM_FUTURE;
}
if (!CVerifyDB().VerifyDB(
- *chainstate, consensus_params, chainstate->CoinsDB(),
+ *chainstate, chainman.GetConsensus(), chainstate->CoinsDB(),
check_level,
check_blocks)) {
return ChainstateLoadVerifyError::ERROR_CORRUPTED_BLOCK_DB;
diff --git a/src/node/chainstate.h b/src/node/chainstate.h
index 279f187642..ff7935e8e0 100644
--- a/src/node/chainstate.h
+++ b/src/node/chainstate.h
@@ -59,7 +59,6 @@ std::optional<ChainstateLoadingError> LoadChainstate(bool fReset,
ChainstateManager& chainman,
CTxMemPool* mempool,
bool fPruneMode,
- const Consensus::Params& consensus_params,
bool fReindexChainState,
int64_t nBlockTreeDBCache,
int64_t nCoinDBCache,
@@ -78,10 +77,8 @@ enum class ChainstateLoadVerifyError {
std::optional<ChainstateLoadVerifyError> VerifyLoadedChainstate(ChainstateManager& chainman,
bool fReset,
bool fReindexChainState,
- const Consensus::Params& consensus_params,
- unsigned int check_blocks,
- unsigned int check_level,
- std::function<int64_t()> get_unix_time_seconds);
+ int check_blocks,
+ int check_level);
} // namespace node
#endif // BITCOIN_NODE_CHAINSTATE_H
diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp
deleted file mode 100644
index eed43a1bc7..0000000000
--- a/src/node/coinstats.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2021 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <node/coinstats.h>
-
-#include <coins.h>
-#include <crypto/muhash.h>
-#include <hash.h>
-#include <index/coinstatsindex.h>
-#include <serialize.h>
-#include <uint256.h>
-#include <util/overflow.h>
-#include <util/system.h>
-#include <validation.h>
-
-#include <map>
-
-namespace node {
-// Database-independent metric indicating the UTXO set size
-uint64_t GetBogoSize(const CScript& script_pub_key)
-{
- return 32 /* txid */ +
- 4 /* vout index */ +
- 4 /* height + coinbase */ +
- 8 /* amount */ +
- 2 /* scriptPubKey len */ +
- script_pub_key.size() /* scriptPubKey */;
-}
-
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin) {
- CDataStream ss(SER_DISK, PROTOCOL_VERSION);
- ss << outpoint;
- ss << static_cast<uint32_t>(coin.nHeight * 2 + coin.fCoinBase);
- ss << coin.out;
- return ss;
-}
-
-//! Warning: be very careful when changing this! assumeutxo and UTXO snapshot
-//! validation commitments are reliant on the hash constructed by this
-//! function.
-//!
-//! If the construction of this hash is changed, it will invalidate
-//! existing UTXO snapshots. This will not result in any kind of consensus
-//! failure, but it will force clients that were expecting to make use of
-//! assumeutxo to do traditional IBD instead.
-//!
-//! It is also possible, though very unlikely, that a change in this
-//! construction could cause a previously invalid (and potentially malicious)
-//! UTXO snapshot to be considered valid.
-static void ApplyHash(CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs)
-{
- for (auto it = outputs.begin(); it != outputs.end(); ++it) {
- if (it == outputs.begin()) {
- ss << hash;
- ss << VARINT(it->second.nHeight * 2 + it->second.fCoinBase ? 1u : 0u);
- }
-
- ss << VARINT(it->first + 1);
- ss << it->second.out.scriptPubKey;
- ss << VARINT_MODE(it->second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED);
-
- if (it == std::prev(outputs.end())) {
- ss << VARINT(0u);
- }
- }
-}
-
-static void ApplyHash(std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs) {}
-
-static void ApplyHash(MuHash3072& muhash, const uint256& hash, const std::map<uint32_t, Coin>& outputs)
-{
- for (auto it = outputs.begin(); it != outputs.end(); ++it) {
- COutPoint outpoint = COutPoint(hash, it->first);
- Coin coin = it->second;
- muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin)));
- }
-}
-
-static void ApplyStats(CCoinsStats& stats, const uint256& hash, const std::map<uint32_t, Coin>& outputs)
-{
- assert(!outputs.empty());
- stats.nTransactions++;
- for (auto it = outputs.begin(); it != outputs.end(); ++it) {
- stats.nTransactionOutputs++;
- if (stats.total_amount.has_value()) {
- stats.total_amount = CheckedAdd(*stats.total_amount, it->second.out.nValue);
- }
- stats.nBogoSize += GetBogoSize(it->second.out.scriptPubKey);
- }
-}
-
-//! Calculate statistics about the unspent transaction output set
-template <typename T>
-static bool GetUTXOStats(CCoinsView* view, BlockManager& blockman, CCoinsStats& stats, T hash_obj, const std::function<void()>& interruption_point, const CBlockIndex* pindex)
-{
- std::unique_ptr<CCoinsViewCursor> pcursor(view->Cursor());
- assert(pcursor);
-
- if (!pindex) {
- LOCK(cs_main);
- pindex = blockman.LookupBlockIndex(view->GetBestBlock());
- }
- stats.nHeight = Assert(pindex)->nHeight;
- stats.hashBlock = pindex->GetBlockHash();
-
- // Use CoinStatsIndex if it is requested and available and a hash_type of Muhash or None was requested
- if ((stats.m_hash_type == CoinStatsHashType::MUHASH || stats.m_hash_type == CoinStatsHashType::NONE) && g_coin_stats_index && stats.index_requested) {
- stats.index_used = true;
- return g_coin_stats_index->LookUpStats(pindex, stats);
- }
-
- PrepareHash(hash_obj, stats);
-
- uint256 prevkey;
- std::map<uint32_t, Coin> outputs;
- while (pcursor->Valid()) {
- interruption_point();
- COutPoint key;
- Coin coin;
- if (pcursor->GetKey(key) && pcursor->GetValue(coin)) {
- if (!outputs.empty() && key.hash != prevkey) {
- ApplyStats(stats, prevkey, outputs);
- ApplyHash(hash_obj, prevkey, outputs);
- outputs.clear();
- }
- prevkey = key.hash;
- outputs[key.n] = std::move(coin);
- stats.coins_count++;
- } else {
- return error("%s: unable to read value", __func__);
- }
- pcursor->Next();
- }
- if (!outputs.empty()) {
- ApplyStats(stats, prevkey, outputs);
- ApplyHash(hash_obj, prevkey, outputs);
- }
-
- FinalizeHash(hash_obj, stats);
-
- stats.nDiskSize = view->EstimateSize();
- return true;
-}
-
-bool GetUTXOStats(CCoinsView* view, BlockManager& blockman, CCoinsStats& stats, const std::function<void()>& interruption_point, const CBlockIndex* pindex)
-{
- switch (stats.m_hash_type) {
- case(CoinStatsHashType::HASH_SERIALIZED): {
- CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
- return GetUTXOStats(view, blockman, stats, ss, interruption_point, pindex);
- }
- case(CoinStatsHashType::MUHASH): {
- MuHash3072 muhash;
- return GetUTXOStats(view, blockman, stats, muhash, interruption_point, pindex);
- }
- case(CoinStatsHashType::NONE): {
- return GetUTXOStats(view, blockman, stats, nullptr, interruption_point, pindex);
- }
- } // no default case, so the compiler can warn about missing cases
- assert(false);
-}
-
-// The legacy hash serializes the hashBlock
-static void PrepareHash(CHashWriter& ss, const CCoinsStats& stats)
-{
- ss << stats.hashBlock;
-}
-// MuHash does not need the prepare step
-static void PrepareHash(MuHash3072& muhash, CCoinsStats& stats) {}
-static void PrepareHash(std::nullptr_t, CCoinsStats& stats) {}
-
-static void FinalizeHash(CHashWriter& ss, CCoinsStats& stats)
-{
- stats.hashSerialized = ss.GetHash();
-}
-static void FinalizeHash(MuHash3072& muhash, CCoinsStats& stats)
-{
- uint256 out;
- muhash.Finalize(out);
- stats.hashSerialized = out;
-}
-static void FinalizeHash(std::nullptr_t, CCoinsStats& stats) {}
-} // namespace node
diff --git a/src/node/coinstats.h b/src/node/coinstats.h
deleted file mode 100644
index aa771b18b0..0000000000
--- a/src/node/coinstats.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2021 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_NODE_COINSTATS_H
-#define BITCOIN_NODE_COINSTATS_H
-
-#include <chain.h>
-#include <coins.h>
-#include <consensus/amount.h>
-#include <streams.h>
-#include <uint256.h>
-
-#include <cstdint>
-#include <functional>
-
-class CCoinsView;
-namespace node {
-class BlockManager;
-} // namespace node
-
-namespace node {
-enum class CoinStatsHashType {
- HASH_SERIALIZED,
- MUHASH,
- NONE,
-};
-
-struct CCoinsStats {
- //! Which hash type to use
- const CoinStatsHashType m_hash_type;
-
- int nHeight{0};
- uint256 hashBlock{};
- uint64_t nTransactions{0};
- uint64_t nTransactionOutputs{0};
- uint64_t nBogoSize{0};
- uint256 hashSerialized{};
- uint64_t nDiskSize{0};
- //! The total amount, or nullopt if an overflow occurred calculating it
- std::optional<CAmount> total_amount{0};
-
- //! The number of coins contained.
- uint64_t coins_count{0};
-
- //! Signals if the coinstatsindex should be used (when available).
- bool index_requested{true};
- //! Signals if the coinstatsindex was used to retrieve the statistics.
- bool index_used{false};
-
- // Following values are only available from coinstats index
-
- //! Total cumulative amount of block subsidies up to and including this block
- CAmount total_subsidy{0};
- //! Total cumulative amount of unspendable coins up to and including this block
- CAmount total_unspendable_amount{0};
- //! Total cumulative amount of prevouts spent up to and including this block
- CAmount total_prevout_spent_amount{0};
- //! Total cumulative amount of outputs created up to and including this block
- CAmount total_new_outputs_ex_coinbase_amount{0};
- //! Total cumulative amount of coinbase outputs up to and including this block
- CAmount total_coinbase_amount{0};
- //! The unspendable coinbase amount from the genesis block
- CAmount total_unspendables_genesis_block{0};
- //! The two unspendable coinbase outputs total amount caused by BIP30
- CAmount total_unspendables_bip30{0};
- //! Total cumulative amount of outputs sent to unspendable scripts (OP_RETURN for example) up to and including this block
- CAmount total_unspendables_scripts{0};
- //! Total cumulative amount of coins lost due to unclaimed miner rewards up to and including this block
- CAmount total_unspendables_unclaimed_rewards{0};
-
- CCoinsStats(CoinStatsHashType hash_type) : m_hash_type(hash_type) {}
-};
-
-//! Calculate statistics about the unspent transaction output set
-bool GetUTXOStats(CCoinsView* view, node::BlockManager& blockman, CCoinsStats& stats, const std::function<void()>& interruption_point = {}, const CBlockIndex* pindex = nullptr);
-
-uint64_t GetBogoSize(const CScript& script_pub_key);
-
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin);
-} // namespace node
-
-#endif // BITCOIN_NODE_COINSTATS_H
diff --git a/src/node/connection_types.cpp b/src/node/connection_types.cpp
new file mode 100644
index 0000000000..904f4371aa
--- /dev/null
+++ b/src/node/connection_types.cpp
@@ -0,0 +1,26 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/connection_types.h>
+#include <cassert>
+
+std::string ConnectionTypeAsString(ConnectionType conn_type)
+{
+ switch (conn_type) {
+ case ConnectionType::INBOUND:
+ return "inbound";
+ case ConnectionType::MANUAL:
+ return "manual";
+ case ConnectionType::FEELER:
+ return "feeler";
+ case ConnectionType::OUTBOUND_FULL_RELAY:
+ return "outbound-full-relay";
+ case ConnectionType::BLOCK_RELAY:
+ return "block-relay-only";
+ case ConnectionType::ADDR_FETCH:
+ return "addr-fetch";
+ } // no default case, so the compiler can warn about missing cases
+
+ assert(false);
+}
diff --git a/src/node/connection_types.h b/src/node/connection_types.h
new file mode 100644
index 0000000000..5e1abcace6
--- /dev/null
+++ b/src/node/connection_types.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_CONNECTION_TYPES_H
+#define BITCOIN_NODE_CONNECTION_TYPES_H
+
+#include <string>
+
+/** Different types of connections to a peer. This enum encapsulates the
+ * information we have available at the time of opening or accepting the
+ * connection. Aside from INBOUND, all types are initiated by us.
+ *
+ * If adding or removing types, please update CONNECTION_TYPE_DOC in
+ * src/rpc/net.cpp and src/qt/rpcconsole.cpp, as well as the descriptions in
+ * src/qt/guiutil.cpp and src/bitcoin-cli.cpp::NetinfoRequestHandler. */
+enum class ConnectionType {
+ /**
+ * Inbound connections are those initiated by a peer. This is the only
+ * property we know at the time of connection, until P2P messages are
+ * exchanged.
+ */
+ INBOUND,
+
+ /**
+ * These are the default connections that we use to connect with the
+ * network. There is no restriction on what is relayed; by default we relay
+ * blocks, addresses & transactions. We automatically attempt to open
+ * MAX_OUTBOUND_FULL_RELAY_CONNECTIONS using addresses from our AddrMan.
+ */
+ OUTBOUND_FULL_RELAY,
+
+
+ /**
+ * We open manual connections to addresses that users explicitly requested
+ * via the addnode RPC or the -addnode/-connect configuration options. Even if a
+ * manual connection is misbehaving, we do not automatically disconnect or
+ * add it to our discouragement filter.
+ */
+ MANUAL,
+
+ /**
+ * Feeler connections are short-lived connections made to check that a node
+ * is alive. They can be useful for:
+ * - test-before-evict: if one of the peers is considered for eviction from
+ * our AddrMan because another peer is mapped to the same slot in the tried table,
+ * evict only if this longer-known peer is offline.
+ * - move node addresses from New to Tried table, so that we have more
+ * connectable addresses in our AddrMan.
+ * Note that in the literature ("Eclipse Attacks on Bitcoin’s Peer-to-Peer Network")
+ * only the latter feature is referred to as "feeler connections",
+ * although in our codebase feeler connections encompass test-before-evict as well.
+ * We make these connections approximately every FEELER_INTERVAL:
+ * first we resolve previously found collisions if they exist (test-before-evict),
+ * otherwise we connect to a node from the new table.
+ */
+ FEELER,
+
+ /**
+ * We use block-relay-only connections to help prevent against partition
+ * attacks. By not relaying transactions or addresses, these connections
+ * are harder to detect by a third party, thus helping obfuscate the
+ * network topology. We automatically attempt to open
+ * MAX_BLOCK_RELAY_ONLY_ANCHORS using addresses from our anchors.dat. Then
+ * addresses from our AddrMan if MAX_BLOCK_RELAY_ONLY_CONNECTIONS
+ * isn't reached yet.
+ */
+ BLOCK_RELAY,
+
+ /**
+ * AddrFetch connections are short lived connections used to solicit
+ * addresses from peers. These are initiated to addresses submitted via the
+ * -seednode command line argument, or under certain conditions when the
+ * AddrMan is empty.
+ */
+ ADDR_FETCH,
+};
+
+/** Convert ConnectionType enum to a string value */
+std::string ConnectionTypeAsString(ConnectionType conn_type);
+
+#endif // BITCOIN_NODE_CONNECTION_TYPES_H
diff --git a/src/node/context.cpp b/src/node/context.cpp
index 893c32f1bc..d80b8ca7a7 100644
--- a/src/node/context.cpp
+++ b/src/node/context.cpp
@@ -7,14 +7,16 @@
#include <addrman.h>
#include <banman.h>
#include <interfaces/chain.h>
+#include <kernel/context.h>
#include <net.h>
#include <net_processing.h>
+#include <netgroup.h>
#include <policy/fees.h>
#include <scheduler.h>
#include <txmempool.h>
#include <validation.h>
namespace node {
-NodeContext::NodeContext() {}
-NodeContext::~NodeContext() {}
+NodeContext::NodeContext() = default;
+NodeContext::~NodeContext() = default;
} // namespace node
diff --git a/src/node/context.h b/src/node/context.h
index 644c997531..31be308787 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_NODE_CONTEXT_H
#define BITCOIN_NODE_CONTEXT_H
+#include <kernel/context.h>
+
#include <cassert>
#include <functional>
#include <memory>
@@ -18,6 +20,7 @@ class CConnman;
class CScheduler;
class CTxMemPool;
class ChainstateManager;
+class NetGroupManager;
class PeerManager;
namespace interfaces {
class Chain;
@@ -38,11 +41,14 @@ namespace node {
//! any member functions. It should just be a collection of references that can
//! be used without pulling in unwanted dependencies or functionality.
struct NodeContext {
+ //! libbitcoin_kernel context
+ std::unique_ptr<kernel::Context> kernel;
//! Init interface for initializing current process and connecting to other processes.
interfaces::Init* init{nullptr};
std::unique_ptr<AddrMan> addrman;
std::unique_ptr<CConnman> connman;
std::unique_ptr<CTxMemPool> mempool;
+ std::unique_ptr<const NetGroupManager> netgroupman;
std::unique_ptr<CBlockPolicyEstimator> fee_estimator;
std::unique_ptr<PeerManager> peerman;
std::unique_ptr<ChainstateManager> chainman;
diff --git a/src/node/eviction.cpp b/src/node/eviction.cpp
new file mode 100644
index 0000000000..33406931d4
--- /dev/null
+++ b/src/node/eviction.cpp
@@ -0,0 +1,240 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/eviction.h>
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <vector>
+
+
+static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ return a.m_min_ping_time > b.m_min_ping_time;
+}
+
+static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ return a.m_connected > b.m_connected;
+}
+
+static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) {
+ return a.nKeyedNetGroup < b.nKeyedNetGroup;
+}
+
+static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
+ if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
+ if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
+ return a.m_connected > b.m_connected;
+}
+
+static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
+ if (a.m_last_tx_time != b.m_last_tx_time) return a.m_last_tx_time < b.m_last_tx_time;
+ if (a.m_relay_txs != b.m_relay_txs) return b.m_relay_txs;
+ if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
+ return a.m_connected > b.m_connected;
+}
+
+// Pick out the potential block-relay only peers, and sort them by last block time.
+static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ if (a.m_relay_txs != b.m_relay_txs) return a.m_relay_txs;
+ if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
+ if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
+ return a.m_connected > b.m_connected;
+}
+
+/**
+ * Sort eviction candidates by network/localhost and connection uptime.
+ * Candidates near the beginning are more likely to be evicted, and those
+ * near the end are more likely to be protected, e.g. less likely to be evicted.
+ * - First, nodes that are not `is_local` and that do not belong to `network`,
+ * sorted by increasing uptime (from most recently connected to connected longer).
+ * - Then, nodes that are `is_local` or belong to `network`, sorted by increasing uptime.
+ */
+struct CompareNodeNetworkTime {
+ const bool m_is_local;
+ const Network m_network;
+ CompareNodeNetworkTime(bool is_local, Network network) : m_is_local(is_local), m_network(network) {}
+ bool operator()(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) const
+ {
+ if (m_is_local && a.m_is_local != b.m_is_local) return b.m_is_local;
+ if ((a.m_network == m_network) != (b.m_network == m_network)) return b.m_network == m_network;
+ return a.m_connected > b.m_connected;
+ };
+};
+
+//! Sort an array by the specified comparator, then erase the last K elements where predicate is true.
+template <typename T, typename Comparator>
+static void EraseLastKElements(
+ std::vector<T>& elements, Comparator comparator, size_t k,
+ std::function<bool(const NodeEvictionCandidate&)> predicate = [](const NodeEvictionCandidate& n) { return true; })
+{
+ std::sort(elements.begin(), elements.end(), comparator);
+ size_t eraseSize = std::min(k, elements.size());
+ elements.erase(std::remove_if(elements.end() - eraseSize, elements.end(), predicate), elements.end());
+}
+
+void ProtectNoBanConnections(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ eviction_candidates.erase(std::remove_if(eviction_candidates.begin(), eviction_candidates.end(),
+ [](NodeEvictionCandidate const& n) {
+ return n.m_noban;
+ }),
+ eviction_candidates.end());
+}
+
+void ProtectOutboundConnections(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ eviction_candidates.erase(std::remove_if(eviction_candidates.begin(), eviction_candidates.end(),
+ [](NodeEvictionCandidate const& n) {
+ return n.m_conn_type != ConnectionType::INBOUND;
+ }),
+ eviction_candidates.end());
+}
+
+void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ // Protect the half of the remaining nodes which have been connected the longest.
+ // This replicates the non-eviction implicit behavior, and precludes attacks that start later.
+ // To favorise the diversity of our peer connections, reserve up to half of these protected
+ // spots for Tor/onion, localhost, I2P, and CJDNS peers, even if they're not longest uptime
+ // overall. This helps protect these higher-latency peers that tend to be otherwise
+ // disadvantaged under our eviction criteria.
+ const size_t initial_size = eviction_candidates.size();
+ const size_t total_protect_size{initial_size / 2};
+
+ // Disadvantaged networks to protect. In the case of equal counts, earlier array members
+ // have the first opportunity to recover unused slots from the previous iteration.
+ struct Net { bool is_local; Network id; size_t count; };
+ std::array<Net, 4> networks{
+ {{false, NET_CJDNS, 0}, {false, NET_I2P, 0}, {/*localhost=*/true, NET_MAX, 0}, {false, NET_ONION, 0}}};
+
+ // Count and store the number of eviction candidates per network.
+ for (Net& n : networks) {
+ n.count = std::count_if(eviction_candidates.cbegin(), eviction_candidates.cend(),
+ [&n](const NodeEvictionCandidate& c) {
+ return n.is_local ? c.m_is_local : c.m_network == n.id;
+ });
+ }
+ // Sort `networks` by ascending candidate count, to give networks having fewer candidates
+ // the first opportunity to recover unused protected slots from the previous iteration.
+ std::stable_sort(networks.begin(), networks.end(), [](Net a, Net b) { return a.count < b.count; });
+
+ // Protect up to 25% of the eviction candidates by disadvantaged network.
+ const size_t max_protect_by_network{total_protect_size / 2};
+ size_t num_protected{0};
+
+ while (num_protected < max_protect_by_network) {
+ // Count the number of disadvantaged networks from which we have peers to protect.
+ auto num_networks = std::count_if(networks.begin(), networks.end(), [](const Net& n) { return n.count; });
+ if (num_networks == 0) {
+ break;
+ }
+ const size_t disadvantaged_to_protect{max_protect_by_network - num_protected};
+ const size_t protect_per_network{std::max(disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
+ // Early exit flag if there are no remaining candidates by disadvantaged network.
+ bool protected_at_least_one{false};
+
+ for (Net& n : networks) {
+ if (n.count == 0) continue;
+ const size_t before = eviction_candidates.size();
+ EraseLastKElements(eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
+ protect_per_network, [&n](const NodeEvictionCandidate& c) {
+ return n.is_local ? c.m_is_local : c.m_network == n.id;
+ });
+ const size_t after = eviction_candidates.size();
+ if (before > after) {
+ protected_at_least_one = true;
+ const size_t delta{before - after};
+ num_protected += delta;
+ if (num_protected >= max_protect_by_network) {
+ break;
+ }
+ n.count -= delta;
+ }
+ }
+ if (!protected_at_least_one) {
+ break;
+ }
+ }
+
+ // Calculate how many we removed, and update our total number of peers that
+ // we want to protect based on uptime accordingly.
+ assert(num_protected == initial_size - eviction_candidates.size());
+ const size_t remaining_to_protect{total_protect_size - num_protected};
+ EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected, remaining_to_protect);
+}
+
+[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates)
+{
+ // Protect connections with certain characteristics
+
+ ProtectNoBanConnections(vEvictionCandidates);
+
+ ProtectOutboundConnections(vEvictionCandidates);
+
+ // Deterministically select 4 peers to protect by netgroup.
+ // An attacker cannot predict which netgroups will be protected
+ EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4);
+ // Protect the 8 nodes with the lowest minimum ping time.
+ // An attacker cannot manipulate this metric without physically moving nodes closer to the target.
+ EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8);
+ // Protect 4 nodes that most recently sent us novel transactions accepted into our mempool.
+ // An attacker cannot manipulate this metric without performing useful work.
+ EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
+ // Protect up to 8 non-tx-relay peers that have sent us novel blocks.
+ EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
+ [](const NodeEvictionCandidate& n) { return !n.m_relay_txs && n.fRelevantServices; });
+
+ // Protect 4 nodes that most recently sent us novel blocks.
+ // An attacker cannot manipulate this metric without performing useful work.
+ EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4);
+
+ // Protect some of the remaining eviction candidates by ratios of desirable
+ // or disadvantaged characteristics.
+ ProtectEvictionCandidatesByRatio(vEvictionCandidates);
+
+ if (vEvictionCandidates.empty()) return std::nullopt;
+
+ // If any remaining peers are preferred for eviction consider only them.
+ // This happens after the other preferences since if a peer is really the best by other criteria (esp relaying blocks)
+ // then we probably don't want to evict it no matter what.
+ if (std::any_of(vEvictionCandidates.begin(),vEvictionCandidates.end(),[](NodeEvictionCandidate const &n){return n.prefer_evict;})) {
+ vEvictionCandidates.erase(std::remove_if(vEvictionCandidates.begin(),vEvictionCandidates.end(),
+ [](NodeEvictionCandidate const &n){return !n.prefer_evict;}),vEvictionCandidates.end());
+ }
+
+ // Identify the network group with the most connections and youngest member.
+ // (vEvictionCandidates is already sorted by reverse connect time)
+ uint64_t naMostConnections;
+ unsigned int nMostConnections = 0;
+ std::chrono::seconds nMostConnectionsTime{0};
+ std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes;
+ for (const NodeEvictionCandidate &node : vEvictionCandidates) {
+ std::vector<NodeEvictionCandidate> &group = mapNetGroupNodes[node.nKeyedNetGroup];
+ group.push_back(node);
+ const auto grouptime{group[0].m_connected};
+
+ if (group.size() > nMostConnections || (group.size() == nMostConnections && grouptime > nMostConnectionsTime)) {
+ nMostConnections = group.size();
+ nMostConnectionsTime = grouptime;
+ naMostConnections = node.nKeyedNetGroup;
+ }
+ }
+
+ // Reduce to the network group with the most connections
+ vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
+
+ // Disconnect from the network group with the most connections
+ return vEvictionCandidates.front().id;
+}
diff --git a/src/node/eviction.h b/src/node/eviction.h
new file mode 100644
index 0000000000..1bb32e5327
--- /dev/null
+++ b/src/node/eviction.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_EVICTION_H
+#define BITCOIN_NODE_EVICTION_H
+
+#include <node/connection_types.h>
+#include <net_permissions.h>
+
+#include <chrono>
+#include <cstdint>
+#include <optional>
+#include <vector>
+
+typedef int64_t NodeId;
+
+struct NodeEvictionCandidate {
+ NodeId id;
+ std::chrono::seconds m_connected;
+ std::chrono::microseconds m_min_ping_time;
+ std::chrono::seconds m_last_block_time;
+ std::chrono::seconds m_last_tx_time;
+ bool fRelevantServices;
+ bool m_relay_txs;
+ bool fBloomFilter;
+ uint64_t nKeyedNetGroup;
+ bool prefer_evict;
+ bool m_is_local;
+ Network m_network;
+ bool m_noban;
+ ConnectionType m_conn_type;
+};
+
+/**
+ * Select an inbound peer to evict after filtering out (protecting) peers having
+ * distinct, difficult-to-forge characteristics. The protection logic picks out
+ * fixed numbers of desirable peers per various criteria, followed by (mostly)
+ * ratios of desirable or disadvantaged peers. If any eviction candidates
+ * remain, the selection logic chooses a peer to evict.
+ */
+[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates);
+
+/** Protect desirable or disadvantaged inbound peers from eviction by ratio.
+ *
+ * This function protects half of the peers which have been connected the
+ * longest, to replicate the non-eviction implicit behavior and preclude attacks
+ * that start later.
+ *
+ * Half of these protected spots (1/4 of the total) are reserved for the
+ * following categories of peers, sorted by longest uptime, even if they're not
+ * longest uptime overall:
+ *
+ * - onion peers connected via our tor control service
+ *
+ * - localhost peers, as manually configured hidden services not using
+ * `-bind=addr[:port]=onion` will not be detected as inbound onion connections
+ *
+ * - I2P peers
+ *
+ * - CJDNS peers
+ *
+ * This helps protect these privacy network peers, which tend to be otherwise
+ * disadvantaged under our eviction criteria for their higher min ping times
+ * relative to IPv4/IPv6 peers, and favorise the diversity of peer connections.
+ */
+void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvictionCandidates);
+
+#endif // BITCOIN_NODE_EVICTION_H
diff --git a/src/node/ui_interface.cpp b/src/node/interface_ui.cpp
index a3a6ede39a..370cde84f8 100644
--- a/src/node/ui_interface.cpp
+++ b/src/node/interface_ui.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/translation.h>
diff --git a/src/node/ui_interface.h b/src/node/interface_ui.h
index 5c7c3e7074..37c0f6392b 100644
--- a/src/node/ui_interface.h
+++ b/src/node/interface_ui.h
@@ -3,8 +3,8 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#ifndef BITCOIN_NODE_UI_INTERFACE_H
-#define BITCOIN_NODE_UI_INTERFACE_H
+#ifndef BITCOIN_NODE_INTERFACE_UI_H
+#define BITCOIN_NODE_INTERFACE_UI_H
#include <functional>
#include <memory>
@@ -25,8 +25,7 @@ class CClientUIInterface
{
public:
/** Flags for CClientUIInterface::ThreadSafeMessageBox */
- enum MessageBoxFlags
- {
+ enum MessageBoxFlags : uint32_t {
ICON_INFORMATION = 0,
ICON_WARNING = (1U << 0),
ICON_ERROR = (1U << 1),
@@ -121,4 +120,4 @@ constexpr auto AbortError = InitError;
extern CClientUIInterface uiInterface;
-#endif // BITCOIN_NODE_UI_INTERFACE_H
+#endif // BITCOIN_NODE_INTERFACE_UI_H
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 855db7b0ec..46e0efb9b6 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -22,7 +22,7 @@
#include <node/coin.h>
#include <node/context.h>
#include <node/transaction.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <policy/feerate.h>
#include <policy/fees.h>
#include <policy/policy.h>
@@ -90,8 +90,16 @@ public:
uint32_t getLogCategories() override { return LogInstance().GetCategoryMask(); }
bool baseInitialize() override
{
- return AppInitBasicSetup(gArgs) && AppInitParameterInteraction(gArgs) && AppInitSanityChecks() &&
- AppInitLockDataDirectory() && AppInitInterfaces(*m_context);
+ if (!AppInitBasicSetup(gArgs)) return false;
+ if (!AppInitParameterInteraction(gArgs, /*use_syscall_sandbox=*/false)) return false;
+
+ m_context->kernel = std::make_unique<kernel::Context>();
+ if (!AppInitSanityChecks(*m_context->kernel)) return false;
+
+ if (!AppInitLockDataDirectory()) return false;
+ if (!AppInitInterfaces(*m_context)) return false;
+
+ return true;
}
bool appInitMain(interfaces::BlockAndHeaderTipInfo* tip_info) override
{
@@ -112,8 +120,48 @@ public:
}
}
bool shutdownRequested() override { return ShutdownRequested(); }
+ bool isSettingIgnored(const std::string& name) override
+ {
+ bool ignored = false;
+ gArgs.LockSettings([&](util::Settings& settings) {
+ if (auto* options = util::FindKey(settings.command_line_options, name)) {
+ ignored = !options->empty();
+ }
+ });
+ return ignored;
+ }
+ util::SettingsValue getPersistentSetting(const std::string& name) override { return gArgs.GetPersistentSetting(name); }
+ void updateRwSetting(const std::string& name, const util::SettingsValue& value) override
+ {
+ gArgs.LockSettings([&](util::Settings& settings) {
+ if (value.isNull()) {
+ settings.rw_settings.erase(name);
+ } else {
+ settings.rw_settings[name] = value;
+ }
+ });
+ gArgs.WriteSettingsFile();
+ }
+ void forceSetting(const std::string& name, const util::SettingsValue& value) override
+ {
+ gArgs.LockSettings([&](util::Settings& settings) {
+ if (value.isNull()) {
+ settings.forced_settings.erase(name);
+ } else {
+ settings.forced_settings[name] = value;
+ }
+ });
+ }
+ void resetSettings() override
+ {
+ gArgs.WriteSettingsFile(/*errors=*/nullptr, /*backup=*/true);
+ gArgs.LockSettings([&](util::Settings& settings) {
+ settings.rw_settings.clear();
+ });
+ gArgs.WriteSettingsFile();
+ }
void mapPort(bool use_upnp, bool use_natpmp) override { StartMapPort(use_upnp, use_natpmp); }
- bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); }
+ bool getProxy(Network net, Proxy& proxy_info) override { return GetProxy(net, proxy_info); }
size_t getNodeCount(ConnectionDirection flags) override
{
return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0;
@@ -212,9 +260,10 @@ public:
bool getHeaderTip(int& height, int64_t& block_time) override
{
LOCK(::cs_main);
- if (::pindexBestHeader) {
- height = ::pindexBestHeader->nHeight;
- block_time = ::pindexBestHeader->GetBlockTime();
+ auto best_header = chainman().m_best_header;
+ if (best_header) {
+ height = best_header->nHeight;
+ block_time = best_header->GetBlockTime();
return true;
}
return false;
@@ -227,7 +276,7 @@ public:
uint256 getBestBlockHash() override
{
const CBlockIndex* tip = WITH_LOCK(::cs_main, return chainman().ActiveChain().Tip());
- return tip ? tip->GetBlockHash() : Params().GenesisBlock().GetHash();
+ return tip ? tip->GetBlockHash() : chainman().GetParams().GenesisBlock().GetHash();
}
int64_t getLastBlockTime() override
{
@@ -235,7 +284,7 @@ public:
if (chainman().ActiveChain().Tip()) {
return chainman().ActiveChain().Tip()->GetBlockTime();
}
- return Params().GenesisBlock().GetBlockTime(); // Genesis block's time of current network
+ return chainman().GetParams().GenesisBlock().GetBlockTime(); // Genesis block's time of current network
}
double getVerificationProgress() override
{
@@ -244,7 +293,7 @@ public:
LOCK(::cs_main);
tip = chainman().ActiveChain().Tip();
}
- return GuessVerificationProgress(Params().TxData(), tip);
+ return GuessVerificationProgress(chainman().GetParams().TxData(), tip);
}
bool isInitialBlockDownload() override {
return chainman().ActiveChainstate().IsInitialBlockDownload();
@@ -425,7 +474,7 @@ public:
// try to handle the request. Otherwise, reraise the exception.
if (!last_handler) {
const UniValue& code = e["code"];
- if (code.isNum() && code.get_int() == RPC_WALLET_NOT_FOUND) {
+ if (code.isNum() && code.getInt<int>() == RPC_WALLET_NOT_FOUND) {
return false;
}
}
@@ -475,27 +524,29 @@ public:
}
bool haveBlockOnDisk(int height) override
{
- LOCK(cs_main);
+ LOCK(::cs_main);
const CChain& active = Assert(m_node.chainman)->ActiveChain();
CBlockIndex* block = active[height];
return block && ((block->nStatus & BLOCK_HAVE_DATA) != 0) && block->nTx > 0;
}
CBlockLocator getTipLocator() override
{
- LOCK(cs_main);
+ LOCK(::cs_main);
const CChain& active = Assert(m_node.chainman)->ActiveChain();
return active.GetLocator();
}
- bool checkFinalTx(const CTransaction& tx) override
+ CBlockLocator getActiveChainLocator(const uint256& block_hash) override
{
- LOCK(cs_main);
- return CheckFinalTx(chainman().ActiveChain().Tip(), tx);
+ LOCK(::cs_main);
+ const CBlockIndex* index = chainman().m_blockman.LookupBlockIndex(block_hash);
+ if (!index) return {};
+ return chainman().ActiveChain().GetLocator(index);
}
std::optional<int> findLocatorFork(const CBlockLocator& locator) override
{
- LOCK(cs_main);
+ LOCK(::cs_main);
const CChainState& active = Assert(m_node.chainman)->ActiveChainstate();
- if (CBlockIndex* fork = active.FindForkInGlobalIndex(locator)) {
+ if (const CBlockIndex* fork = active.FindForkInGlobalIndex(locator)) {
return fork->nHeight;
}
return std::nullopt;
@@ -549,8 +600,8 @@ public:
void findCoins(std::map<COutPoint, Coin>& coins) override { return FindCoins(m_node, coins); }
double guessVerificationProgress(const uint256& block_hash) override
{
- LOCK(cs_main);
- return GuessVerificationProgress(Params().TxData(), chainman().m_blockman.LookupBlockIndex(block_hash));
+ LOCK(::cs_main);
+ return GuessVerificationProgress(chainman().GetParams().TxData(), chainman().m_blockman.LookupBlockIndex(block_hash));
}
bool hasBlocks(const uint256& block_hash, int min_height, std::optional<int> max_height) override
{
@@ -562,7 +613,7 @@ public:
// used to limit the range, and passing min_height that's too low or
// max_height that's too high will not crash or change the result.
LOCK(::cs_main);
- if (CBlockIndex* block = chainman().m_blockman.LookupBlockIndex(block_hash)) {
+ if (const CBlockIndex* block = chainman().m_blockman.LookupBlockIndex(block_hash)) {
if (max_height && block->nHeight >= *max_height) block = block->GetAncestor(*max_height);
for (; block->nStatus & BLOCK_HAVE_DATA; block = block->pprev) {
// Check pprev to not segfault if min_height is too low
@@ -595,7 +646,7 @@ public:
bool relay,
std::string& err_string) override
{
- const TransactionError err = BroadcastTransaction(m_node, tx, err_string, max_tx_fee, relay, /*wait_callback*/ false);
+ const TransactionError err = BroadcastTransaction(m_node, tx, err_string, max_tx_fee, relay, /*wait_callback=*/false);
// Chain clients only care about failures to accept the tx to the mempool. Disregard non-mempool related failures.
// Note: this will need to be updated if BroadcastTransactions() is updated to return other non-mempool failures
// that Chain clients do not need to know about.
@@ -609,8 +660,12 @@ public:
}
void getPackageLimits(unsigned int& limit_ancestor_count, unsigned int& limit_descendant_count) override
{
- limit_ancestor_count = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- limit_descendant_count = gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
+ const CTxMemPool::Limits default_limits{};
+
+ const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_limits : default_limits};
+
+ limit_ancestor_count = limits.ancestor_count;
+ limit_descendant_count = limits.descendant_count;
}
bool checkChainLimits(const CTransactionRef& tx) override
{
@@ -618,15 +673,12 @@ public:
LockPoints lp;
CTxMemPoolEntry entry(tx, 0, 0, 0, false, 0, lp);
CTxMemPool::setEntries ancestors;
- auto limit_ancestor_count = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- auto limit_ancestor_size = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
- auto limit_descendant_count = gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
- auto limit_descendant_size = gArgs.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000;
+ const CTxMemPool::Limits& limits{m_node.mempool->m_limits};
std::string unused_error_string;
LOCK(m_node.mempool->cs);
return m_node.mempool->CalculateMemPoolAncestors(
- entry, ancestors, limit_ancestor_count, limit_ancestor_size,
- limit_descendant_count, limit_descendant_size, unused_error_string);
+ entry, ancestors, limits.ancestor_count, limits.ancestor_size_vbytes,
+ limits.descendant_count, limits.descendant_size_vbytes, unused_error_string);
}
CFeeRate estimateSmartFee(int num_blocks, bool conservative, FeeCalculation* calc) override
{
@@ -641,15 +693,15 @@ public:
CFeeRate mempoolMinFee() override
{
if (!m_node.mempool) return {};
- return m_node.mempool->GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
+ return m_node.mempool->GetMinFee();
}
CFeeRate relayMinFee() override { return ::minRelayTxFee; }
CFeeRate relayIncrementalFee() override { return ::incrementalRelayFee; }
CFeeRate relayDustFee() override { return ::dustRelayFee; }
bool havePruned() override
{
- LOCK(cs_main);
- return node::fHavePruned;
+ LOCK(::cs_main);
+ return m_node.chainman->m_blockman.m_have_pruned;
}
bool isReadyToBroadcast() override { return !node::fImporting && !node::fReindex && !isInitialBlockDownload(); }
bool isInitialBlockDownload() override {
diff --git a/src/node/mempool_persist_args.cpp b/src/node/mempool_persist_args.cpp
new file mode 100644
index 0000000000..4e775869c6
--- /dev/null
+++ b/src/node/mempool_persist_args.cpp
@@ -0,0 +1,23 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/mempool_persist_args.h>
+
+#include <fs.h>
+#include <util/system.h>
+#include <validation.h>
+
+namespace node {
+
+bool ShouldPersistMempool(const ArgsManager& argsman)
+{
+ return argsman.GetBoolArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL);
+}
+
+fs::path MempoolPath(const ArgsManager& argsman)
+{
+ return argsman.GetDataDirNet() / "mempool.dat";
+}
+
+} // namespace node
diff --git a/src/node/mempool_persist_args.h b/src/node/mempool_persist_args.h
new file mode 100644
index 0000000000..f719ec62ab
--- /dev/null
+++ b/src/node/mempool_persist_args.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_MEMPOOL_PERSIST_ARGS_H
+#define BITCOIN_NODE_MEMPOOL_PERSIST_ARGS_H
+
+#include <fs.h>
+
+class ArgsManager;
+
+namespace node {
+
+/**
+ * Default for -persistmempool, indicating whether the node should attempt to
+ * automatically load the mempool on start and save to disk on shutdown
+ */
+static constexpr bool DEFAULT_PERSIST_MEMPOOL{true};
+
+bool ShouldPersistMempool(const ArgsManager& argsman);
+fs::path MempoolPath(const ArgsManager& argsman);
+
+} // namespace node
+
+#endif // BITCOIN_NODE_MEMPOOL_PERSIST_ARGS_H
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index 7fe10ecabc..9db10feae4 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -50,8 +50,8 @@ void RegenerateCommitments(CBlock& block, ChainstateManager& chainman)
tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block));
block.vtx.at(0) = MakeTransactionRef(tx);
- CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock));
- GenerateCoinbaseCommitment(block, prev_block, Params().GetConsensus());
+ const CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock));
+ chainman.GenerateCoinbaseCommitment(block, prev_block);
block.hashMerkleRoot = BlockMerkleRoot(block);
}
@@ -62,8 +62,8 @@ BlockAssembler::Options::Options()
nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
}
-BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params, const Options& options)
- : chainparams(params),
+BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options)
+ : chainparams{chainstate.m_chainman.GetParams()},
m_mempool(mempool),
m_chainstate(chainstate)
{
@@ -87,8 +87,8 @@ static BlockAssembler::Options DefaultOptions()
return options;
}
-BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params)
- : BlockAssembler(chainstate, mempool, params, DefaultOptions()) {}
+BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool)
+ : BlockAssembler(chainstate, mempool, DefaultOptions()) {}
void BlockAssembler::resetBlock()
{
@@ -97,7 +97,6 @@ void BlockAssembler::resetBlock()
// Reserve space for coinbase tx
nBlockWeight = 4000;
nBlockSigOpsCost = 400;
- fIncludeWitness = false;
// These counters do not include coinbase tx
nBlockTx = 0;
@@ -122,12 +121,12 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
- LOCK2(cs_main, m_mempool.cs);
+ LOCK(::cs_main);
CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip();
assert(pindexPrev != nullptr);
nHeight = pindexPrev->nHeight + 1;
- pblock->nVersion = g_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
+ pblock->nVersion = m_chainstate.m_chainman.m_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
// -regtest only: allow overriding block.nVersion with
// -blockversion=N to test forking scenarios
if (chainparams.MineBlocksOnDemand()) {
@@ -137,20 +136,12 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblock->nTime = GetAdjustedTime();
m_lock_time_cutoff = pindexPrev->GetMedianTimePast();
- // Decide whether to include witness transactions
- // This is only needed in case the witness softfork activation is reverted
- // (which would require a very deep reorganization).
- // Note that the mempool would accept transactions with witness data before
- // the deployment is active, but we would only ever mine blocks after activation
- // unless there is a massive block reorganization with the witness softfork
- // not activated.
- // TODO: replace this with a call to main to assess validity of a mempool
- // transaction (which in most cases can be a no-op).
- fIncludeWitness = DeploymentActiveAfter(pindexPrev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT);
-
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
- addPackageTxs(nPackagesSelected, nDescendantsUpdated);
+ if (m_mempool) {
+ LOCK(m_mempool->cs);
+ addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated);
+ }
int64_t nTime1 = GetTimeMicros();
@@ -166,7 +157,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx));
- pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus());
+ pblocktemplate->vchCoinbaseCommitment = m_chainstate.m_chainman.GenerateCoinbaseCommitment(*pblock, pindexPrev);
pblocktemplate->vTxFees[0] = -nFees;
LogPrintf("CreateNewBlock(): block weight: %u txs: %u fees: %ld sigops %d\n", GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
@@ -179,7 +170,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]);
BlockValidationState state;
- if (!TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, false, false)) {
+ if (!TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, GetAdjustedTime, false, false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
int64_t nTime2 = GetTimeMicros();
@@ -215,17 +206,12 @@ bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost
// Perform transaction-level checks before adding to block:
// - transaction finality (locktime)
-// - premature witness (in case segwit transactions are added to mempool before
-// segwit activation)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) const
{
for (CTxMemPool::txiter it : package) {
if (!IsFinalTx(it->GetTx(), nHeight, m_lock_time_cutoff)) {
return false;
}
- if (!fIncludeWitness && it->GetTx().HasWitness()) {
- return false;
- }
}
return true;
}
@@ -249,15 +235,19 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
}
}
-int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
- indexed_modified_transaction_set &mapModifiedTx)
+/** Add descendants of given transactions to mapModifiedTx with ancestor
+ * state updated assuming given transactions are inBlock. Returns number
+ * of updated descendants. */
+static int UpdatePackagesForAdded(const CTxMemPool& mempool,
+ const CTxMemPool::setEntries& alreadyAdded,
+ indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs)
{
- AssertLockHeld(m_mempool.cs);
+ AssertLockHeld(mempool.cs);
int nDescendantsUpdated = 0;
for (CTxMemPool::txiter it : alreadyAdded) {
CTxMemPool::setEntries descendants;
- m_mempool.CalculateDescendants(it, descendants);
+ mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
for (CTxMemPool::txiter desc : descendants) {
if (alreadyAdded.count(desc)) {
@@ -279,23 +269,6 @@ int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& already
return nDescendantsUpdated;
}
-// Skip entries in mapTx that are already in a block or are present
-// in mapModifiedTx (which implies that the mapTx ancestor state is
-// stale due to ancestor inclusion in the block)
-// Also skip transactions that we've already failed to add. This can happen if
-// we consider a transaction in mapModifiedTx and it fails: we can then
-// potentially consider it again while walking mapTx. It's currently
-// guaranteed to fail again, but as a belt-and-suspenders check we put it in
-// failedTx and avoid re-evaluation, since the re-evaluation would be using
-// cached size/sigops/fee values that are not actually correct.
-bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx)
-{
- AssertLockHeld(m_mempool.cs);
-
- assert(it != m_mempool.mapTx.end());
- return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it);
-}
-
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
@@ -317,9 +290,9 @@ void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::ve
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
-void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated)
+void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated)
{
- AssertLockHeld(m_mempool.cs);
+ AssertLockHeld(mempool.cs);
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
@@ -327,11 +300,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
// Keep track of entries that failed inclusion, to avoid duplicate work
CTxMemPool::setEntries failedTx;
- // Start by adding all descendants of previously added txs to mapModifiedTx
- // and modifying them for their already included ancestors
- UpdatePackagesForAdded(inBlock, mapModifiedTx);
-
- CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = m_mempool.mapTx.get<ancestor_score>().begin();
+ CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
@@ -340,12 +309,27 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
- while (mi != m_mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
+ while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
// First try to find a new transaction in mapTx to evaluate.
- if (mi != m_mempool.mapTx.get<ancestor_score>().end() &&
- SkipMapTxEntry(m_mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
- ++mi;
- continue;
+ //
+ // Skip entries in mapTx that are already in a block or are present
+ // in mapModifiedTx (which implies that the mapTx ancestor state is
+ // stale due to ancestor inclusion in the block)
+ // Also skip transactions that we've already failed to add. This can happen if
+ // we consider a transaction in mapModifiedTx and it fails: we can then
+ // potentially consider it again while walking mapTx. It's currently
+ // guaranteed to fail again, but as a belt-and-suspenders check we put it in
+ // failedTx and avoid re-evaluation, since the re-evaluation would be using
+ // cached size/sigops/fee values that are not actually correct.
+ /** Return true if given transaction from mapTx has already been evaluated,
+ * or if the transaction's cached data in mapTx is incorrect. */
+ if (mi != mempool.mapTx.get<ancestor_score>().end()) {
+ auto it = mempool.mapTx.project<0>(mi);
+ assert(it != mempool.mapTx.end());
+ if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it)) {
+ ++mi;
+ continue;
+ }
}
// Now that mi is not stale, determine which transaction to evaluate:
@@ -353,13 +337,13 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
- if (mi == m_mempool.mapTx.get<ancestor_score>().end()) {
+ if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
- iter = m_mempool.mapTx.project<0>(mi);
+ iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
@@ -414,7 +398,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
CTxMemPool::setEntries ancestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
- m_mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
+ mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
@@ -444,25 +428,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
++nPackagesSelected;
// Update transactions that depend on each of these
- nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx);
- }
-}
-
-void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
-{
- // Update nExtraNonce
- static uint256 hashPrevBlock;
- if (hashPrevBlock != pblock->hashPrevBlock) {
- nExtraNonce = 0;
- hashPrevBlock = pblock->hashPrevBlock;
+ nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx);
}
- ++nExtraNonce;
- unsigned int nHeight = pindexPrev->nHeight + 1; // Height first in coinbase required for block.version=2
- CMutableTransaction txCoinbase(*pblock->vtx[0]);
- txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce));
- assert(txCoinbase.vin[0].scriptSig.size() <= 100);
-
- pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
- pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
}
} // namespace node
diff --git a/src/node/miner.h b/src/node/miner.h
index c96da874a7..26454df3df 100644
--- a/src/node/miner.h
+++ b/src/node/miner.h
@@ -45,7 +45,7 @@ struct CTxMemPoolModifiedEntry {
nSigOpCostWithAncestors = entry->GetSigOpCostWithAncestors();
}
- int64_t GetModifiedFee() const { return iter->GetModifiedFee(); }
+ CAmount GetModifiedFee() const { return iter->GetModifiedFee(); }
uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; }
CAmount GetModFeesWithAncestors() const { return nModFeesWithAncestors; }
size_t GetTxSize() const { return iter->GetTxSize(); }
@@ -116,7 +116,7 @@ struct update_for_parent_inclusion
void operator() (CTxMemPoolModifiedEntry &e)
{
- e.nModFeesWithAncestors -= iter->GetFee();
+ e.nModFeesWithAncestors -= iter->GetModifiedFee();
e.nSizeWithAncestors -= iter->GetTxSize();
e.nSigOpCostWithAncestors -= iter->GetSigOpCost();
}
@@ -132,7 +132,6 @@ private:
std::unique_ptr<CBlockTemplate> pblocktemplate;
// Configuration parameters for the block size
- bool fIncludeWitness;
unsigned int nBlockMaxWeight;
CFeeRate blockMinFeeRate;
@@ -148,7 +147,7 @@ private:
int64_t m_lock_time_cutoff;
const CChainParams& chainparams;
- const CTxMemPool& m_mempool;
+ const CTxMemPool* const m_mempool;
CChainState& m_chainstate;
public:
@@ -158,8 +157,8 @@ public:
CFeeRate blockMinFeeRate;
};
- explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params);
- explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params, const Options& options);
+ explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool);
+ explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options);
/** Construct a new block template with coinbase to scriptPubKeyIn */
std::unique_ptr<CBlockTemplate> CreateNewBlock(const CScript& scriptPubKeyIn);
@@ -178,7 +177,7 @@ private:
/** Add transactions based on feerate including unconfirmed ancestors
* Increments nPackagesSelected / nDescendantsUpdated with corresponding
* statistics from the package selection (for logging statistics). */
- void addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
+ void addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs);
// helper functions for addPackageTxs()
/** Remove confirmed (inBlock) entries from given set */
@@ -190,19 +189,10 @@ private:
* These checks should always succeed, and they're here
* only as an extra check in case of suboptimal node configuration */
bool TestPackageTransactions(const CTxMemPool::setEntries& package) const;
- /** Return true if given transaction from mapTx has already been evaluated,
- * or if the transaction's cached data in mapTx is incorrect. */
- bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
/** Sort the package in an order that is valid to appear in a block */
void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
- /** Add descendants of given transactions to mapModifiedTx with ancestor
- * state updated assuming given transactions are inBlock. Returns number
- * of updated descendants. */
- int UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
};
-/** Modify the extranonce in a block */
-void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce);
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
/** Update an old GenerateCoinbaseCommitment from CreateNewBlock after the block txs have changed */
diff --git a/src/node/transaction.h b/src/node/transaction.h
index b7cf225636..0604754a46 100644
--- a/src/node/transaction.h
+++ b/src/node/transaction.h
@@ -5,7 +5,6 @@
#ifndef BITCOIN_NODE_TRANSACTION_H
#define BITCOIN_NODE_TRANSACTION_H
-#include <attributes.h>
#include <policy/feerate.h>
#include <primitives/transaction.h>
#include <util/error.h>