aboutsummaryrefslogtreecommitdiff
path: root/src/node
diff options
context:
space:
mode:
authorMarcoFalke <falke.marco@gmail.com>2022-01-02 17:05:43 +0100
committerMarcoFalke <falke.marco@gmail.com>2022-01-02 17:05:14 +0100
commitfade2a44f4aabc64185031dbf4c70d875ece6740 (patch)
treef2bf6d1694f74d0ae2a4d12f10a0bf4cb9dbd3d3 /src/node
parent8b5a4de904b414fb3a818732cd0a2c90b91bc275 (diff)
Move BlockManager to node/blockstorage
Can be reviewed with --color-moved=dimmed-zebra
Diffstat (limited to 'src/node')
-rw-r--r--src/node/blockstorage.cpp369
-rw-r--r--src/node/blockstorage.h84
2 files changed, 453 insertions, 0 deletions
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 53bc2b5069..96899fe5a7 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -12,6 +12,7 @@
#include <fs.h>
#include <hash.h>
#include <pow.h>
+#include <reverse_iterator.h>
#include <shutdown.h>
#include <signet.h>
#include <streams.h>
@@ -47,6 +48,374 @@ static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
static FlatFileSeq UndoFileSeq();
+CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
+{
+ AssertLockHeld(cs_main);
+ BlockMap::const_iterator it = m_block_index.find(hash);
+ return it == m_block_index.end() ? nullptr : it->second;
+}
+
+CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
+{
+ AssertLockHeld(cs_main);
+
+ // Check for duplicate
+ uint256 hash = block.GetHash();
+ BlockMap::iterator it = m_block_index.find(hash);
+ if (it != m_block_index.end())
+ return it->second;
+
+ // Construct new block index object
+ CBlockIndex* pindexNew = new CBlockIndex(block);
+ // We assign the sequence id to blocks only when the full data is available,
+ // to avoid miners withholding blocks but broadcasting headers, to get a
+ // competitive advantage.
+ pindexNew->nSequenceId = 0;
+ BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+ BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
+ if (miPrev != m_block_index.end())
+ {
+ pindexNew->pprev = (*miPrev).second;
+ pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
+ pindexNew->BuildSkip();
+ }
+ pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
+ pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
+ pindexNew->RaiseValidity(BLOCK_VALID_TREE);
+ if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ pindexBestHeader = pindexNew;
+
+ setDirtyBlockIndex.insert(pindexNew);
+
+ return pindexNew;
+}
+
+void BlockManager::PruneOneBlockFile(const int fileNumber)
+{
+ AssertLockHeld(cs_main);
+ LOCK(cs_LastBlockFile);
+
+ for (const auto& entry : m_block_index) {
+ CBlockIndex* pindex = entry.second;
+ if (pindex->nFile == fileNumber) {
+ pindex->nStatus &= ~BLOCK_HAVE_DATA;
+ pindex->nStatus &= ~BLOCK_HAVE_UNDO;
+ pindex->nFile = 0;
+ pindex->nDataPos = 0;
+ pindex->nUndoPos = 0;
+ setDirtyBlockIndex.insert(pindex);
+
+ // Prune from m_blocks_unlinked -- any block we prune would have
+ // to be downloaded again in order to consider its chain, at which
+ // point it would be considered as a candidate for
+ // m_blocks_unlinked or setBlockIndexCandidates.
+ auto range = m_blocks_unlinked.equal_range(pindex->pprev);
+ while (range.first != range.second) {
+ std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
+ range.first++;
+ if (_it->second == pindex) {
+ m_blocks_unlinked.erase(_it);
+ }
+ }
+ }
+ }
+
+ vinfoBlockFile[fileNumber].SetNull();
+ setDirtyFileInfo.insert(fileNumber);
+}
+
+void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
+{
+ assert(fPruneMode && nManualPruneHeight > 0);
+
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0) {
+ return;
+ }
+
+ // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
+ unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
+ int count = 0;
+ for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
+ if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+ PruneOneBlockFile(fileNumber);
+ setFilesToPrune.insert(fileNumber);
+ count++;
+ }
+ LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
+}
+
+void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
+{
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0 || nPruneTarget == 0) {
+ return;
+ }
+ if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
+ return;
+ }
+
+ unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
+ uint64_t nCurrentUsage = CalculateCurrentUsage();
+ // We don't check to prune until after we've allocated new space for files
+ // So we should leave a buffer under our target to account for another allocation
+ // before the next pruning.
+ uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
+ uint64_t nBytesToPrune;
+ int count = 0;
+
+ if (nCurrentUsage + nBuffer >= nPruneTarget) {
+ // On a prune event, the chainstate DB is flushed.
+ // To avoid excessive prune events negating the benefit of high dbcache
+ // values, we should not prune too rapidly.
+ // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
+ if (is_ibd) {
+ // Since this is only relevant during IBD, we use a fixed 10%
+ nBuffer += nPruneTarget / 10;
+ }
+
+ for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
+ nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
+
+ if (vinfoBlockFile[fileNumber].nSize == 0) {
+ continue;
+ }
+
+ if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
+ break;
+ }
+
+ // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
+ if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+
+ PruneOneBlockFile(fileNumber);
+ // Queue up the files for removal
+ setFilesToPrune.insert(fileNumber);
+ nCurrentUsage -= nBytesToPrune;
+ count++;
+ }
+ }
+
+ LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
+ nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
+ ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
+ nLastBlockWeCanPrune, count);
+}
+
+CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
+{
+ AssertLockHeld(cs_main);
+
+ if (hash.IsNull())
+ return nullptr;
+
+ // Return existing
+ BlockMap::iterator mi = m_block_index.find(hash);
+ if (mi != m_block_index.end())
+ return (*mi).second;
+
+ // Create new
+ CBlockIndex* pindexNew = new CBlockIndex();
+ mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+
+ return pindexNew;
+}
+
+bool BlockManager::LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ ChainstateManager& chainman)
+{
+ if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
+ return false;
+ }
+
+ // Calculate nChainWork
+ std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
+ vSortedByHeight.reserve(m_block_index.size());
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
+ {
+ CBlockIndex* pindex = item.second;
+ vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
+ }
+ sort(vSortedByHeight.begin(), vSortedByHeight.end());
+
+ // Find start of assumed-valid region.
+ int first_assumed_valid_height = std::numeric_limits<int>::max();
+
+ for (const auto& [height, block] : vSortedByHeight) {
+ if (block->IsAssumedValid()) {
+ auto chainstates = chainman.GetAll();
+
+ // If we encounter an assumed-valid block index entry, ensure that we have
+ // one chainstate that tolerates assumed-valid entries and another that does
+ // not (i.e. the background validation chainstate), since assumed-valid
+ // entries should always be pending validation by a fully-validated chainstate.
+ auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
+ assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
+ assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
+
+ first_assumed_valid_height = height;
+ break;
+ }
+ }
+
+ for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
+ {
+ if (ShutdownRequested()) return false;
+ CBlockIndex* pindex = item.second;
+ pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
+
+ // We can link the chain of blocks for which we've received transactions at some point, or
+ // blocks that are assumed-valid on the basis of snapshot load (see
+ // PopulateAndValidateSnapshot()).
+ // Pruned nodes may have deleted the block.
+ if (pindex->nTx > 0) {
+ if (pindex->pprev) {
+ if (pindex->pprev->nChainTx > 0) {
+ pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
+ } else {
+ pindex->nChainTx = 0;
+ m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
+ }
+ } else {
+ pindex->nChainTx = pindex->nTx;
+ }
+ }
+ if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
+ pindex->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(pindex);
+ }
+ if (pindex->IsAssumedValid() ||
+ (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
+ (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
+
+ // Fill each chainstate's block candidate set. Only add assumed-valid
+ // blocks to the tip candidate set if the chainstate is allowed to rely on
+ // assumed-valid blocks.
+ //
+ // If all setBlockIndexCandidates contained the assumed-valid blocks, the
+ // background chainstate's ActivateBestChain() call would add assumed-valid
+ // blocks to the chain (based on how FindMostWorkChain() works). Obviously
+ // we don't want this since the purpose of the background validation chain
+ // is to validate assued-valid blocks.
+ //
+ // Note: This is considering all blocks whose height is greater or equal to
+ // the first assumed-valid block to be assumed-valid blocks, and excluding
+ // them from the background chainstate's setBlockIndexCandidates set. This
+ // does mean that some blocks which are not technically assumed-valid
+ // (later blocks on a fork beginning before the first assumed-valid block)
+ // might not get added to the the background chainstate, but this is ok,
+ // because they will still be attached to the active chainstate if they
+ // actually contain more work.
+ //
+ // Instad of this height-based approach, an earlier attempt was made at
+ // detecting "holistically" whether the block index under consideration
+ // relied on an assumed-valid ancestor, but this proved to be too slow to
+ // be practical.
+ for (CChainState* chainstate : chainman.GetAll()) {
+ if (chainstate->reliesOnAssumedValid() ||
+ pindex->nHeight < first_assumed_valid_height) {
+ chainstate->setBlockIndexCandidates.insert(pindex);
+ }
+ }
+ }
+ if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
+ chainman.m_best_invalid = pindex;
+ }
+ if (pindex->pprev)
+ pindex->BuildSkip();
+ if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
+ pindexBestHeader = pindex;
+ }
+
+ return true;
+}
+
+void BlockManager::Unload() {
+ m_blocks_unlinked.clear();
+
+ for (const BlockMap::value_type& entry : m_block_index) {
+ delete entry.second;
+ }
+
+ m_block_index.clear();
+}
+
+bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
+{
+ if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
+ return false;
+ }
+
+ // Load block file info
+ m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
+ vinfoBlockFile.resize(nLastBlockFile + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
+ for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
+ m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
+ }
+ LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
+ for (int nFile = nLastBlockFile + 1; true; nFile++) {
+ CBlockFileInfo info;
+ if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
+ vinfoBlockFile.push_back(info);
+ } else {
+ break;
+ }
+ }
+
+ // Check presence of blk files
+ LogPrintf("Checking all blk files are present...\n");
+ std::set<int> setBlkDataFiles;
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
+ CBlockIndex* pindex = item.second;
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ setBlkDataFiles.insert(pindex->nFile);
+ }
+ }
+ for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
+ {
+ FlatFilePos pos(*it, 0);
+ if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
+ return false;
+ }
+ }
+
+ // Check whether we have ever pruned block & undo files
+ m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
+ if (fHavePruned)
+ LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
+
+ // Check whether we need to continue reindexing
+ bool fReindexing = false;
+ m_block_tree_db->ReadReindexing(fReindexing);
+ if(fReindexing) fReindex = true;
+
+ return true;
+}
+
+CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
+{
+ const MapCheckpoints& checkpoints = data.mapCheckpoints;
+
+ for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
+ {
+ const uint256& hash = i.second;
+ CBlockIndex* pindex = LookupBlockIndex(hash);
+ if (pindex) {
+ return pindex;
+ }
+ }
+ return nullptr;
+}
+
bool IsBlockPruned(const CBlockIndex* pblockindex)
{
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 7c7bf68178..ec9f5f791a 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -7,6 +7,7 @@
#include <fs.h>
#include <protocol.h> // For CMessageHeader::MessageStartChars
+#include <txdb.h>
#include <atomic>
#include <cstdint>
@@ -20,7 +21,9 @@ class CBlockIndex;
class CBlockUndo;
class CChain;
class CChainParams;
+class CChainState;
class ChainstateManager;
+struct CCheckpointData;
struct FlatFilePos;
namespace Consensus {
struct Params;
@@ -45,6 +48,87 @@ extern bool fPruneMode;
/** Number of MiB of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
+typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
+
+struct CBlockIndexWorkComparator
+{
+ bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const;
+};
+
+/**
+ * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
+ * to determine where the most-work tip is.
+ *
+ * This data is used mostly in `CChainState` - information about, e.g.,
+ * candidate tips is not maintained here.
+ */
+class BlockManager
+{
+ friend CChainState;
+
+private:
+ /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
+ void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height);
+
+ /**
+ * Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target.
+ * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
+ * space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
+ * (which in this case means the blockchain must be re-downloaded.)
+ *
+ * Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
+ * Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
+ * Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
+ * Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
+ * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
+ * A db flag records the fact that at least some block files have been pruned.
+ *
+ * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
+ */
+ void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd);
+
+public:
+ BlockMap m_block_index GUARDED_BY(cs_main);
+
+ /**
+ * All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
+ * Pruned nodes may have entries where B is missing data.
+ */
+ std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
+
+ std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
+
+ bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
+ /**
+ * Load the blocktree off disk and into memory. Populate certain metadata
+ * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
+ * collections like setDirtyBlockIndex.
+ */
+ bool LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /** Clear all data members. */
+ void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Create a new block index entry for a given block hash */
+ CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ //! Mark one block file as pruned (modify associated database entries)
+ void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ //! Returns last CBlockIndex* that is a checkpoint
+ CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ ~BlockManager() {
+ Unload();
+ }
+};
+
//! Check whether the block associated with this index entry is pruned or not.
bool IsBlockPruned(const CBlockIndex* pblockindex);