aboutsummaryrefslogtreecommitdiff
path: root/src/node
diff options
context:
space:
mode:
authorJames O'Beirne <james.obeirne@pm.me>2023-05-03 14:55:03 -0400
committerJames O'Beirne <james.obeirne@pm.me>2023-09-30 06:40:17 -0400
commit7fcd21544a333ffdf1910b65c573579860be6a36 (patch)
tree9e97c21851526d9a6a589a34b72c6b2eeb1dc5a5 /src/node
parent4c3b8ca35c2e4a441264749bb312df2bd054b5b8 (diff)
downloadbitcoin-7fcd21544a333ffdf1910b65c573579860be6a36.tar.xz
blockstorage: segment normal/assumedvalid blockfiles
When using an assumedvalid (snapshot) chainstate along with a background chainstate, we are syncing two very different regions of the chain simultaneously. If we use the same blockfile space for both of these syncs, wildly different height blocks will be stored alongside one another, making pruning ineffective. This change implements a separate blockfile cursor for the assumedvalid chainstate when one is in use.
Diffstat (limited to 'src/node')
-rw-r--r--src/node/blockstorage.cpp139
-rw-r--r--src/node/blockstorage.h84
2 files changed, 177 insertions, 46 deletions
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 7ed2346ae4..5e61ed3100 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -10,6 +10,7 @@
#include <dbwrapper.h>
#include <flatfile.h>
#include <hash.h>
+#include <kernel/chain.h>
#include <kernel/chainparams.h>
#include <kernel/messagestartchars.h>
#include <logging.h>
@@ -273,7 +274,7 @@ void BlockManager::FindFilesToPruneManual(
const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
int count = 0;
- for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
+ for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
const auto& fileinfo = m_blockfile_info[fileNumber];
if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
continue;
@@ -325,7 +326,7 @@ void BlockManager::FindFilesToPrune(
nBuffer += target / 10;
}
- for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
+ for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
const auto& fileinfo = m_blockfile_info[fileNumber];
nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
@@ -385,19 +386,25 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
return false;
}
- int snapshot_height = -1;
if (snapshot_blockhash) {
const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash));
- snapshot_height = au_data.height;
+ m_snapshot_height = au_data.height;
CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
- // Since nChainTx (responsible for estiamted progress) isn't persisted
+ // Since nChainTx (responsible for estimated progress) isn't persisted
// to disk, we must bootstrap the value for assumedvalid chainstates
// from the hardcoded assumeutxo chainparams.
base->nChainTx = au_data.nChainTx;
LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString());
+ } else {
+ // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
+ // is null. This is relevant during snapshot completion, when the blockman may be loaded
+ // with a height that then needs to be cleared after the snapshot is fully validated.
+ m_snapshot_height.reset();
}
+ Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
+
// Calculate nChainWork
std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
@@ -414,7 +421,7 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
- if (snapshot_blockhash && pindex->nHeight == snapshot_height &&
+ if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
pindex->GetBlockHash() == *snapshot_blockhash) {
// Should have been set above; don't disturb it with code below.
Assert(pindex->nChainTx > 0);
@@ -455,7 +462,8 @@ bool BlockManager::WriteBlockIndexDB()
vBlocks.push_back(*it);
m_dirty_blockindex.erase(it++);
}
- if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) {
+ int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
+ if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
return false;
}
return true;
@@ -466,16 +474,17 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
if (!LoadBlockIndex(snapshot_blockhash)) {
return false;
}
+ int max_blockfile_num{0};
// Load block file info
- m_block_tree_db->ReadLastBlockFile(m_last_blockfile);
- m_blockfile_info.resize(m_last_blockfile + 1);
- LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile);
- for (int nFile = 0; nFile <= m_last_blockfile; nFile++) {
+ m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
+ m_blockfile_info.resize(max_blockfile_num + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
+ for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
}
- LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString());
- for (int nFile = m_last_blockfile + 1; true; nFile++) {
+ LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
+ for (int nFile = max_blockfile_num + 1; true; nFile++) {
CBlockFileInfo info;
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
m_blockfile_info.push_back(info);
@@ -499,6 +508,15 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
}
}
+ {
+ // Initialize the blockfile cursors.
+ LOCK(cs_LastBlockFile);
+ for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
+ const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
+ m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
+ }
+ }
+
// Check whether we have ever pruned block & undo files
m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
if (m_have_pruned) {
@@ -516,12 +534,13 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
{
AssertLockHeld(::cs_main);
+ int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
if (!m_have_pruned) {
return;
}
std::set<int> block_files_to_prune;
- for (int file_number = 0; file_number < m_last_blockfile; file_number++) {
+ for (int file_number = 0; file_number < max_blockfile; file_number++) {
if (m_blockfile_info[file_number].nSize == 0) {
block_files_to_prune.insert(file_number);
}
@@ -696,7 +715,7 @@ bool BlockManager::FlushUndoFile(int block_file, bool finalize)
return true;
}
-bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
+bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
{
bool success = true;
LOCK(cs_LastBlockFile);
@@ -708,9 +727,9 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
// have populated `m_blockfile_info` via LoadBlockIndexDB().
return true;
}
- assert(static_cast<int>(m_blockfile_info.size()) > m_last_blockfile);
+ assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
- FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize);
+ FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error.");
success = false;
@@ -718,13 +737,33 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
// e.g. during IBD or a sync after a node going offline
if (!fFinalize || finalize_undo) {
- if (!FlushUndoFile(m_last_blockfile, finalize_undo)) {
+ if (!FlushUndoFile(blockfile_num, finalize_undo)) {
success = false;
}
}
return success;
}
+BlockfileType BlockManager::BlockfileTypeForHeight(int height)
+{
+ if (!m_snapshot_height) {
+ return BlockfileType::NORMAL;
+ }
+ return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
+}
+
+bool BlockManager::FlushChainstateBlockFile(int tip_height)
+{
+ LOCK(cs_LastBlockFile);
+ auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
+ if (cursor) {
+ // The cursor may not exist after a snapshot has been loaded but before any
+ // blocks have been downloaded.
+ return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
+ }
+ return false;
+}
+
uint64_t BlockManager::CalculateCurrentUsage()
{
LOCK(cs_LastBlockFile);
@@ -779,8 +818,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
{
LOCK(cs_LastBlockFile);
- unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile;
- if (m_blockfile_info.size() <= nFile) {
+ const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
+
+ if (!m_blockfile_cursors[chain_type]) {
+ // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
+ assert(chain_type == BlockfileType::ASSUMED);
+ const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
+ m_blockfile_cursors[chain_type] = new_cursor;
+ LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
+ }
+ const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
+
+ int nFile = fKnown ? pos.nFile : last_blockfile;
+ if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
m_blockfile_info.resize(nFile + 1);
}
@@ -797,13 +847,20 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
}
}
assert(nAddSize < max_blockfile_size);
+
while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
// when the undo file is keeping up with the block file, we want to flush it explicitly
// when it is lagging behind (more blocks arrive than are being connected), we let the
// undo block write case handle it
- finalize_undo = (m_blockfile_info[nFile].nHeightLast == m_undo_height_in_last_blockfile);
- nFile++;
- if (m_blockfile_info.size() <= nFile) {
+ finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
+ Assert(m_blockfile_cursors[chain_type])->undo_height);
+
+ // Try the next unclaimed blockfile number
+ nFile = this->MaxBlockfileNum() + 1;
+ // Set to increment MaxBlockfileNum() for next iteration
+ m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
+
+ if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
m_blockfile_info.resize(nFile + 1);
}
}
@@ -811,9 +868,10 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
pos.nPos = m_blockfile_info[nFile].nSize;
}
- if ((int)nFile != m_last_blockfile) {
+ if (nFile != last_blockfile) {
if (!fKnown) {
- LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString());
+ LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
+ last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
}
// Do not propagate the return code. The flush concerns a previous block
@@ -823,13 +881,13 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
// data may be inconsistent after a crash if the flush is called during
// a reindex. A flush error might also leave some of the data files
// untrimmed.
- if (!FlushBlockFile(!fKnown, finalize_undo)) {
+ if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) {
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
"Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n",
- m_last_blockfile, !fKnown, finalize_undo, nFile);
+ last_blockfile, !fKnown, finalize_undo, nFile);
}
- m_last_blockfile = nFile;
- m_undo_height_in_last_blockfile = 0; // No undo data yet in the new file, so reset our undo-height tracking.
+ // No undo data yet in the new file, so reset our undo-height tracking.
+ m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
}
m_blockfile_info[nFile].AddBlock(nHeight, nTime);
@@ -903,6 +961,9 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
{
AssertLockHeld(::cs_main);
+ const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
+ auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
+
// Write undo information to disk
if (block.GetUndoPos().IsNull()) {
FlatFilePos _pos;
@@ -917,7 +978,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
// the FindBlockPos function
- if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
+ if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
// Do not propagate the return code, a failed flush here should not
// be an indication for a failed write. If it were propagated here,
// the caller would assume the undo data not to be written, when in
@@ -926,8 +987,8 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid
if (!FlushUndoFile(_pos.nFile, true)) {
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
}
- } else if (_pos.nFile == m_last_blockfile && static_cast<uint32_t>(block.nHeight) > m_undo_height_in_last_blockfile) {
- m_undo_height_in_last_blockfile = block.nHeight;
+ } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
+ cursor.undo_height = block.nHeight;
}
// update nUndoPos in block index
block.nUndoPos = _pos.nPos;
@@ -1126,4 +1187,18 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile
}
} // End scope of ImportingNow
}
+
+std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
+ switch(type) {
+ case BlockfileType::NORMAL: os << "normal"; break;
+ case BlockfileType::ASSUMED: os << "assumed"; break;
+ default: os.setstate(std::ios_base::failbit);
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
+ os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
+ return os;
+}
} // namespace node
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index fcd9fb9f67..ac97728c05 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -9,6 +9,7 @@
#include <chain.h>
#include <dbwrapper.h>
#include <kernel/blockmanager_opts.h>
+#include <kernel/chain.h>
#include <kernel/chainparams.h>
#include <kernel/cs_main.h>
#include <kernel/messagestartchars.h>
@@ -36,7 +37,6 @@ class CBlockUndo;
class CChainParams;
class Chainstate;
class ChainstateManager;
-enum class ChainstateRole;
struct CCheckpointData;
struct FlatFilePos;
namespace Consensus {
@@ -98,6 +98,35 @@ struct PruneLockInfo {
int height_first{std::numeric_limits<int>::max()}; //! Height of earliest block that should be kept and not pruned
};
+enum BlockfileType {
+ // Values used as array indexes - do not change carelessly.
+ NORMAL = 0,
+ ASSUMED = 1,
+ NUM_TYPES = 2,
+};
+
+std::ostream& operator<<(std::ostream& os, const BlockfileType& type);
+
+struct BlockfileCursor {
+ // The latest blockfile number.
+ int file_num{0};
+
+ // Track the height of the highest block in file_num whose undo
+ // data has been written. Block data is written to block files in download
+ // order, but is written to undo files in validation order, which is
+ // usually in order by height. To avoid wasting disk space, undo files will
+ // be trimmed whenever the corresponding block file is finalized and
+ // the height of the highest block written to the block file equals the
+ // height of the highest block written to the undo file. This is a
+ // heuristic and can sometimes preemptively trim undo files that will write
+ // more data later, and sometimes fail to trim undo files that can't have
+ // more data written later.
+ int undo_height{0};
+};
+
+std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor);
+
+
/**
* Maintains a tree of blocks (stored in `m_block_index`) which is consulted
* to determine where the most-work tip is.
@@ -122,12 +151,13 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Return false if block file or undo file flushing fails. */
- [[nodiscard]] bool FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
+ [[nodiscard]] bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo);
/** Return false if undo file flushing fails. */
[[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false);
[[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown);
+ [[nodiscard]] bool FlushChainstateBlockFile(int tip_height);
bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize);
FlatFileSeq BlockFileSeq() const;
@@ -169,19 +199,29 @@ private:
RecursiveMutex cs_LastBlockFile;
std::vector<CBlockFileInfo> m_blockfile_info;
- int m_last_blockfile = 0;
- // Track the height of the highest block in m_last_blockfile whose undo
- // data has been written. Block data is written to block files in download
- // order, but is written to undo files in validation order, which is
- // usually in order by height. To avoid wasting disk space, undo files will
- // be trimmed whenever the corresponding block file is finalized and
- // the height of the highest block written to the block file equals the
- // height of the highest block written to the undo file. This is a
- // heuristic and can sometimes preemptively trim undo files that will write
- // more data later, and sometimes fail to trim undo files that can't have
- // more data written later.
- unsigned int m_undo_height_in_last_blockfile = 0;
+ //! Since assumedvalid chainstates may be syncing a range of the chain that is very
+ //! far away from the normal/background validation process, we should segment blockfiles
+ //! for assumed chainstates. Otherwise, we might have wildly different height ranges
+ //! mixed into the same block files, which would impair our ability to prune
+ //! effectively.
+ //!
+ //! This data structure maintains separate blockfile number cursors for each
+ //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos().
+ //!
+ //! The first element is the NORMAL cursor, second is ASSUMED.
+ std::array<std::optional<BlockfileCursor>, BlockfileType::NUM_TYPES>
+ m_blockfile_cursors GUARDED_BY(cs_LastBlockFile) = {
+ BlockfileCursor{},
+ std::nullopt,
+ };
+ int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
+ {
+ static const BlockfileCursor empty_cursor;
+ const auto& normal = m_blockfile_cursors[BlockfileType::NORMAL].value_or(empty_cursor);
+ const auto& assumed = m_blockfile_cursors[BlockfileType::ASSUMED].value_or(empty_cursor);
+ return std::max(normal.file_num, assumed.file_num);
+ }
/** Global flag to indicate we should check to see if there are
* block/undo files that should be deleted. Set on startup
@@ -205,6 +245,8 @@ private:
*/
std::unordered_map<std::string, PruneLockInfo> m_prune_locks GUARDED_BY(::cs_main);
+ BlockfileType BlockfileTypeForHeight(int height);
+
const kernel::BlockManagerOpts m_opts;
public:
@@ -220,6 +262,20 @@ public:
BlockMap m_block_index GUARDED_BY(cs_main);
+ /**
+ * The height of the base block of an assumeutxo snapshot, if one is in use.
+ *
+ * This controls how blockfiles are segmented by chainstate type to avoid
+ * comingling different height regions of the chain when an assumedvalid chainstate
+ * is in use. If heights are drastically different in the same blockfile, pruning
+ * suffers.
+ *
+ * This is set during ActivateSnapshot() or upon LoadBlockIndex() if a snapshot
+ * had been previously loaded. After the snapshot is validated, this is unset to
+ * restore normal LoadBlockIndex behavior.
+ */
+ std::optional<int> m_snapshot_height;
+
std::vector<CBlockIndex*> GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**