aboutsummaryrefslogtreecommitdiff
path: root/src/node/blockstorage.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/node/blockstorage.cpp')
-rw-r--r--src/node/blockstorage.cpp510
1 files changed, 439 insertions, 71 deletions
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 53bc2b5069..8a99130fd0 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -12,6 +12,7 @@
#include <fs.h>
#include <hash.h>
#include <pow.h>
+#include <reverse_iterator.h>
#include <shutdown.h>
#include <signet.h>
#include <streams.h>
@@ -20,42 +21,422 @@
#include <util/system.h>
#include <validation.h>
+namespace node {
std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
uint64_t nPruneTarget = 0;
-// TODO make namespace {
-RecursiveMutex cs_LastBlockFile;
-std::vector<CBlockFileInfo> vinfoBlockFile;
-int nLastBlockFile = 0;
-/** Global flag to indicate we should check to see if there are
-* block/undo files that should be deleted. Set on startup
-* or if we allocate more file space when we're in prune mode
-*/
-bool fCheckForPruning = false;
-
-/** Dirty block index entries. */
-std::set<CBlockIndex*> setDirtyBlockIndex;
-
-/** Dirty block file entries. */
-std::set<int> setDirtyFileInfo;
-// } // namespace
-
static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
static FlatFileSeq UndoFileSeq();
+CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
+{
+ AssertLockHeld(cs_main);
+ BlockMap::const_iterator it = m_block_index.find(hash);
+ return it == m_block_index.end() ? nullptr : it->second;
+}
+
+CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
+{
+ AssertLockHeld(cs_main);
+
+ // Check for duplicate
+ uint256 hash = block.GetHash();
+ BlockMap::iterator it = m_block_index.find(hash);
+ if (it != m_block_index.end()) {
+ return it->second;
+ }
+
+ // Construct new block index object
+ CBlockIndex* pindexNew = new CBlockIndex(block);
+ // We assign the sequence id to blocks only when the full data is available,
+ // to avoid miners withholding blocks but broadcasting headers, to get a
+ // competitive advantage.
+ pindexNew->nSequenceId = 0;
+ BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+ BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
+ if (miPrev != m_block_index.end()) {
+ pindexNew->pprev = (*miPrev).second;
+ pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
+ pindexNew->BuildSkip();
+ }
+ pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
+ pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
+ pindexNew->RaiseValidity(BLOCK_VALID_TREE);
+ if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ pindexBestHeader = pindexNew;
+
+ m_dirty_blockindex.insert(pindexNew);
+
+ return pindexNew;
+}
+
+void BlockManager::PruneOneBlockFile(const int fileNumber)
+{
+ AssertLockHeld(cs_main);
+ LOCK(cs_LastBlockFile);
+
+ for (const auto& entry : m_block_index) {
+ CBlockIndex* pindex = entry.second;
+ if (pindex->nFile == fileNumber) {
+ pindex->nStatus &= ~BLOCK_HAVE_DATA;
+ pindex->nStatus &= ~BLOCK_HAVE_UNDO;
+ pindex->nFile = 0;
+ pindex->nDataPos = 0;
+ pindex->nUndoPos = 0;
+ m_dirty_blockindex.insert(pindex);
+
+ // Prune from m_blocks_unlinked -- any block we prune would have
+ // to be downloaded again in order to consider its chain, at which
+ // point it would be considered as a candidate for
+ // m_blocks_unlinked or setBlockIndexCandidates.
+ auto range = m_blocks_unlinked.equal_range(pindex->pprev);
+ while (range.first != range.second) {
+ std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
+ range.first++;
+ if (_it->second == pindex) {
+ m_blocks_unlinked.erase(_it);
+ }
+ }
+ }
+ }
+
+ m_blockfile_info[fileNumber].SetNull();
+ m_dirty_fileinfo.insert(fileNumber);
+}
+
+void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
+{
+ assert(fPruneMode && nManualPruneHeight > 0);
+
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0) {
+ return;
+ }
+
+ // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
+ unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
+ int count = 0;
+ for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
+ if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+ PruneOneBlockFile(fileNumber);
+ setFilesToPrune.insert(fileNumber);
+ count++;
+ }
+ LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
+}
+
+void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
+{
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0 || nPruneTarget == 0) {
+ return;
+ }
+ if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
+ return;
+ }
+
+ unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
+ uint64_t nCurrentUsage = CalculateCurrentUsage();
+ // We don't check to prune until after we've allocated new space for files
+ // So we should leave a buffer under our target to account for another allocation
+ // before the next pruning.
+ uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
+ uint64_t nBytesToPrune;
+ int count = 0;
+
+ if (nCurrentUsage + nBuffer >= nPruneTarget) {
+ // On a prune event, the chainstate DB is flushed.
+ // To avoid excessive prune events negating the benefit of high dbcache
+ // values, we should not prune too rapidly.
+ // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
+ if (is_ibd) {
+ // Since this is only relevant during IBD, we use a fixed 10%
+ nBuffer += nPruneTarget / 10;
+ }
+
+ for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
+ nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize;
+
+ if (m_blockfile_info[fileNumber].nSize == 0) {
+ continue;
+ }
+
+ if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
+ break;
+ }
+
+ // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
+ if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+
+ PruneOneBlockFile(fileNumber);
+ // Queue up the files for removal
+ setFilesToPrune.insert(fileNumber);
+ nCurrentUsage -= nBytesToPrune;
+ count++;
+ }
+ }
+
+ LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
+ nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
+ ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
+ nLastBlockWeCanPrune, count);
+}
+
+CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
+{
+ AssertLockHeld(cs_main);
+
+ if (hash.IsNull()) {
+ return nullptr;
+ }
+
+ // Return existing
+ BlockMap::iterator mi = m_block_index.find(hash);
+ if (mi != m_block_index.end()) {
+ return (*mi).second;
+ }
+
+ // Create new
+ CBlockIndex* pindexNew = new CBlockIndex();
+ mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+
+ return pindexNew;
+}
+
+bool BlockManager::LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ ChainstateManager& chainman)
+{
+ if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
+ return false;
+ }
+
+ // Calculate nChainWork
+ std::vector<std::pair<int, CBlockIndex*>> vSortedByHeight;
+ vSortedByHeight.reserve(m_block_index.size());
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
+ CBlockIndex* pindex = item.second;
+ vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
+ }
+ sort(vSortedByHeight.begin(), vSortedByHeight.end());
+
+ // Find start of assumed-valid region.
+ int first_assumed_valid_height = std::numeric_limits<int>::max();
+
+ for (const auto& [height, block] : vSortedByHeight) {
+ if (block->IsAssumedValid()) {
+ auto chainstates = chainman.GetAll();
+
+ // If we encounter an assumed-valid block index entry, ensure that we have
+ // one chainstate that tolerates assumed-valid entries and another that does
+ // not (i.e. the background validation chainstate), since assumed-valid
+ // entries should always be pending validation by a fully-validated chainstate.
+ auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
+ assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
+ assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
+
+ first_assumed_valid_height = height;
+ break;
+ }
+ }
+
+ for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight) {
+ if (ShutdownRequested()) return false;
+ CBlockIndex* pindex = item.second;
+ pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
+
+ // We can link the chain of blocks for which we've received transactions at some point, or
+ // blocks that are assumed-valid on the basis of snapshot load (see
+ // PopulateAndValidateSnapshot()).
+ // Pruned nodes may have deleted the block.
+ if (pindex->nTx > 0) {
+ if (pindex->pprev) {
+ if (pindex->pprev->nChainTx > 0) {
+ pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
+ } else {
+ pindex->nChainTx = 0;
+ m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
+ }
+ } else {
+ pindex->nChainTx = pindex->nTx;
+ }
+ }
+ if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
+ pindex->nStatus |= BLOCK_FAILED_CHILD;
+ m_dirty_blockindex.insert(pindex);
+ }
+ if (pindex->IsAssumedValid() ||
+ (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
+ (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
+
+ // Fill each chainstate's block candidate set. Only add assumed-valid
+ // blocks to the tip candidate set if the chainstate is allowed to rely on
+ // assumed-valid blocks.
+ //
+ // If all setBlockIndexCandidates contained the assumed-valid blocks, the
+ // background chainstate's ActivateBestChain() call would add assumed-valid
+ // blocks to the chain (based on how FindMostWorkChain() works). Obviously
+ // we don't want this since the purpose of the background validation chain
+ // is to validate assued-valid blocks.
+ //
+ // Note: This is considering all blocks whose height is greater or equal to
+ // the first assumed-valid block to be assumed-valid blocks, and excluding
+ // them from the background chainstate's setBlockIndexCandidates set. This
+ // does mean that some blocks which are not technically assumed-valid
+ // (later blocks on a fork beginning before the first assumed-valid block)
+ // might not get added to the background chainstate, but this is ok,
+ // because they will still be attached to the active chainstate if they
+ // actually contain more work.
+ //
+ // Instead of this height-based approach, an earlier attempt was made at
+ // detecting "holistically" whether the block index under consideration
+ // relied on an assumed-valid ancestor, but this proved to be too slow to
+ // be practical.
+ for (CChainState* chainstate : chainman.GetAll()) {
+ if (chainstate->reliesOnAssumedValid() ||
+ pindex->nHeight < first_assumed_valid_height) {
+ chainstate->setBlockIndexCandidates.insert(pindex);
+ }
+ }
+ }
+ if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
+ chainman.m_best_invalid = pindex;
+ }
+ if (pindex->pprev) {
+ pindex->BuildSkip();
+ }
+ if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
+ pindexBestHeader = pindex;
+ }
+
+ return true;
+}
+
+void BlockManager::Unload()
+{
+ m_blocks_unlinked.clear();
+
+ for (const BlockMap::value_type& entry : m_block_index) {
+ delete entry.second;
+ }
+
+ m_block_index.clear();
+
+ m_blockfile_info.clear();
+ m_last_blockfile = 0;
+ m_dirty_blockindex.clear();
+ m_dirty_fileinfo.clear();
+}
+
+bool BlockManager::WriteBlockIndexDB()
+{
+ AssertLockHeld(::cs_main);
+ std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
+ vFiles.reserve(m_dirty_fileinfo.size());
+ for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
+ vFiles.push_back(std::make_pair(*it, &m_blockfile_info[*it]));
+ m_dirty_fileinfo.erase(it++);
+ }
+ std::vector<const CBlockIndex*> vBlocks;
+ vBlocks.reserve(m_dirty_blockindex.size());
+ for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
+ vBlocks.push_back(*it);
+ m_dirty_blockindex.erase(it++);
+ }
+ if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) {
+ return false;
+ }
+ return true;
+}
+
+bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
+{
+ if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
+ return false;
+ }
+
+ // Load block file info
+ m_block_tree_db->ReadLastBlockFile(m_last_blockfile);
+ m_blockfile_info.resize(m_last_blockfile + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile);
+ for (int nFile = 0; nFile <= m_last_blockfile; nFile++) {
+ m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
+ }
+ LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString());
+ for (int nFile = m_last_blockfile + 1; true; nFile++) {
+ CBlockFileInfo info;
+ if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
+ m_blockfile_info.push_back(info);
+ } else {
+ break;
+ }
+ }
+
+ // Check presence of blk files
+ LogPrintf("Checking all blk files are present...\n");
+ std::set<int> setBlkDataFiles;
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
+ CBlockIndex* pindex = item.second;
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ setBlkDataFiles.insert(pindex->nFile);
+ }
+ }
+ for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
+ FlatFilePos pos(*it, 0);
+ if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
+ return false;
+ }
+ }
+
+ // Check whether we have ever pruned block & undo files
+ m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
+ if (fHavePruned) {
+ LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
+ }
+
+ // Check whether we need to continue reindexing
+ bool fReindexing = false;
+ m_block_tree_db->ReadReindexing(fReindexing);
+ if (fReindexing) fReindex = true;
+
+ return true;
+}
+
+CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
+{
+ const MapCheckpoints& checkpoints = data.mapCheckpoints;
+
+ for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
+ const uint256& hash = i.second;
+ CBlockIndex* pindex = LookupBlockIndex(hash);
+ if (pindex) {
+ return pindex;
+ }
+ }
+ return nullptr;
+}
+
bool IsBlockPruned(const CBlockIndex* pblockindex)
{
+ AssertLockHeld(::cs_main);
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
}
// If we're using -prune with -reindex, then delete block files that will be ignored by the
// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
// is missing, do the same here to delete any later block files after a gap. Also delete all
-// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
+// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
// is in sync with what's actually on disk by the time we start downloading, so that pruning
// works correctly.
void CleanupBlockRevFiles()
@@ -95,16 +476,11 @@ void CleanupBlockRevFiles()
}
}
-std::string CBlockFileInfo::ToString() const
-{
- return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
-}
-
-CBlockFileInfo* GetBlockFileInfo(size_t n)
+CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
{
LOCK(cs_LastBlockFile);
- return &vinfoBlockFile.at(n);
+ return &m_blockfile_info.at(n);
}
static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
@@ -138,7 +514,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- FlatFilePos pos = pindex->GetUndoPos();
+ const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())};
+
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -168,32 +545,32 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
return true;
}
-static void FlushUndoFile(int block_file, bool finalize = false)
+void BlockManager::FlushUndoFile(int block_file, bool finalize)
{
- FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
+ FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
}
}
-void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
+void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
{
LOCK(cs_LastBlockFile);
- FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
+ FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize);
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
}
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
// e.g. during IBD or a sync after a node going offline
- if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
+ if (!fFinalize || finalize_undo) FlushUndoFile(m_last_blockfile, finalize_undo);
}
-uint64_t CalculateCurrentUsage()
+uint64_t BlockManager::CalculateCurrentUsage()
{
LOCK(cs_LastBlockFile);
uint64_t retval = 0;
- for (const CBlockFileInfo& file : vinfoBlockFile) {
+ for (const CBlockFileInfo& file : m_blockfile_info) {
retval += file.nSize + file.nUndoSize;
}
return retval;
@@ -235,44 +612,44 @@ fs::path GetBlockPosFilename(const FlatFilePos& pos)
return BlockFileSeq().FileName(pos);
}
-bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
+bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown)
{
LOCK(cs_LastBlockFile);
- unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
+ unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile;
+ if (m_blockfile_info.size() <= nFile) {
+ m_blockfile_info.resize(nFile + 1);
}
bool finalize_undo = false;
if (!fKnown) {
- while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
+ while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
// when the undo file is keeping up with the block file, we want to flush it explicitly
// when it is lagging behind (more blocks arrive than are being connected), we let the
// undo block write case handle it
- finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
+ finalize_undo = (m_blockfile_info[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
nFile++;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
+ if (m_blockfile_info.size() <= nFile) {
+ m_blockfile_info.resize(nFile + 1);
}
}
pos.nFile = nFile;
- pos.nPos = vinfoBlockFile[nFile].nSize;
+ pos.nPos = m_blockfile_info[nFile].nSize;
}
- if ((int)nFile != nLastBlockFile) {
+ if ((int)nFile != m_last_blockfile) {
if (!fKnown) {
- LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString());
}
FlushBlockFile(!fKnown, finalize_undo);
- nLastBlockFile = nFile;
+ m_last_blockfile = nFile;
}
- vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
+ m_blockfile_info[nFile].AddBlock(nHeight, nTime);
if (fKnown) {
- vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
+ m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize);
} else {
- vinfoBlockFile[nFile].nSize += nAddSize;
+ m_blockfile_info[nFile].nSize += nAddSize;
}
if (!fKnown) {
@@ -282,23 +659,23 @@ bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight,
return AbortNode("Disk space is too low!", _("Disk space is too low!"));
}
if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
+ m_check_for_pruning = true;
}
}
- setDirtyFileInfo.insert(nFile);
+ m_dirty_fileinfo.insert(nFile);
return true;
}
-static bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
+bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
- pos.nPos = vinfoBlockFile[nFile].nUndoSize;
- vinfoBlockFile[nFile].nUndoSize += nAddSize;
- setDirtyFileInfo.insert(nFile);
+ pos.nPos = m_blockfile_info[nFile].nUndoSize;
+ m_blockfile_info[nFile].nUndoSize += nAddSize;
+ m_dirty_fileinfo.insert(nFile);
bool out_of_space;
size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
@@ -306,7 +683,7 @@ static bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos
return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
}
if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
+ m_check_for_pruning = true;
}
return true;
@@ -335,8 +712,9 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
return true;
}
-bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
+ AssertLockHeld(::cs_main);
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
FlatFilePos _pos;
@@ -351,14 +729,14 @@ bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& st
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
// the FindBlockPos function
- if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
+ if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(pindex->nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
FlushUndoFile(_pos.nFile, true);
}
// update nUndoPos in block index
pindex->nUndoPos = _pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
- setDirtyBlockIndex.insert(pindex);
+ m_dirty_blockindex.insert(pindex);
}
return true;
@@ -435,7 +813,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
}
block.resize(blk_size); // Zeroing of memory is intentional here
- filein.read((char*)block.data(), blk_size);
+ filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
}
@@ -443,19 +821,8 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
-{
- FlatFilePos block_pos;
- {
- LOCK(cs_main);
- block_pos = pindex->GetBlockPos();
- }
-
- return ReadRawBlockFromDisk(block, block_pos, message_start);
-}
-
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
+FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
{
unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
FlatFilePos blockPos;
@@ -561,3 +928,4 @@ void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFile
} // End scope of CImportingNow
chainman.ActiveChainstate().LoadMempool(args);
}
+} // namespace node