aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp111
1 files changed, 66 insertions, 45 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index 58686632f9..b5d6a66088 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -19,7 +19,6 @@
#include <deploymentstatus.h>
#include <flatfile.h>
#include <hash.h>
-#include <index/blockfilterindex.h>
#include <logging.h>
#include <logging/timer.h>
#include <node/blockstorage.h>
@@ -56,12 +55,11 @@
#include <warnings.h>
#include <algorithm>
+#include <deque>
#include <numeric>
#include <optional>
#include <string>
-#include <boost/algorithm/string/replace.hpp>
-
using node::BLOCKFILE_CHUNK_SIZE;
using node::BlockManager;
using node::BlockMap;
@@ -106,6 +104,12 @@ const std::vector<std::string> CHECKLEVEL_DOC {
"level 4 tries to reconnect the blocks",
"each level includes the checks of the previous levels",
};
+/** The number of blocks to keep below the deepest prune lock.
+ * There is nothing special about this number. It is higher than what we
+ * expect to see in regular mainnet reorgs, but not so high that it would
+ * noticeably interfere with the pruning mechanism.
+ * */
+static constexpr int PRUNE_LOCK_BUFFER{10};
/**
* Mutex to guard access to validation specific variables, such as reading
@@ -246,7 +250,12 @@ bool CheckSequenceLocksAtTip(CBlockIndex* tip,
maxInputHeight = std::max(maxInputHeight, height);
}
}
- lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
+ // tip->GetAncestor(maxInputHeight) should never return a nullptr
+ // because maxInputHeight is always less than the tip height.
+ // It would, however, be a bad bug to continue execution, since a
+ // LockPoints object with the maxInputBlock member set to nullptr
+ // signifies no relative lock time.
+ lp->maxInputBlock = Assert(tip->GetAncestor(maxInputHeight));
}
}
return EvaluateSequenceLocks(index, lockPair);
@@ -1111,6 +1120,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
if (!ConsensusScriptChecks(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PolicyScriptChecks() passed, this should never fail.
+ Assume(false);
all_submitted = false;
package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
@@ -1125,6 +1135,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
m_limit_descendant_size, unused_err_string)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
+ Assume(false);
all_submitted = false;
package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
@@ -1138,6 +1149,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
if (!Finalize(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since LimitMempoolSize() won't be called, this should never fail.
+ Assume(false);
all_submitted = false;
package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
@@ -1483,7 +1495,7 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
}
CoinsViews::CoinsViews(
- std::string ldb_name,
+ fs::path ldb_name,
size_t cache_size_bytes,
bool in_memory,
bool should_wipe) : m_dbview(
@@ -1511,7 +1523,7 @@ void CChainState::InitCoinsDB(
size_t cache_size_bytes,
bool in_memory,
bool should_wipe,
- std::string leveldb_name)
+ fs::path leveldb_name)
{
if (m_from_snapshot_blockhash) {
leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
@@ -1569,7 +1581,7 @@ static void AlertNotify(const std::string& strMessage)
std::string singleQuote("'");
std::string safeStatus = SanitizeString(strMessage);
safeStatus = singleQuote+safeStatus+singleQuote;
- boost::replace_all(strCmd, "%s", safeStatus);
+ ReplaceAll(strCmd, "%s", safeStatus);
std::thread t(runCommand, strCmd);
t.detach(); // thread runs free
@@ -1916,7 +1928,7 @@ public:
}
};
-static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
+static std::array<ThresholdConditionCache, VERSIONBITS_NUM_BITS> warningcache GUARDED_BY(cs_main);
static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Consensus::Params& consensusparams)
{
@@ -2338,12 +2350,24 @@ bool CChainState::FlushStateToDisk(
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(m_blockman.cs_LastBlockFile);
if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
- // make sure we don't prune above the blockfilterindexes bestblocks
+ // make sure we don't prune above any of the prune locks bestblocks
// pruning is height-based
- int last_prune = m_chain.Height(); // last height we can prune
- ForEachBlockFilterIndex([&](BlockFilterIndex& index) {
- last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
- });
+ int last_prune{m_chain.Height()}; // last height we can prune
+ std::optional<std::string> limiting_lock; // prune lock that actually was the limiting factor, only used for logging
+
+ for (const auto& prune_lock : m_blockman.m_prune_locks) {
+ if (prune_lock.second.height_first == std::numeric_limits<int>::max()) continue;
+ // Remove the buffer and one additional block here to get actual height that is outside of the buffer
+ const int lock_height{prune_lock.second.height_first - PRUNE_LOCK_BUFFER - 1};
+ last_prune = std::max(1, std::min(last_prune, lock_height));
+ if (last_prune == lock_height) {
+ limiting_lock = prune_lock.first;
+ }
+ }
+
+ if (limiting_lock) {
+ LogPrint(BCLog::PRUNE, "%s limited pruning to height %d\n", limiting_lock.value(), last_prune);
+ }
if (nManualPruneHeight > 0) {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
@@ -2533,7 +2557,7 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew)
const CBlockIndex* pindex = pindexNew;
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
WarningBitsConditionChecker checker(bit);
- ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache[bit]);
+ ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache.at(bit));
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
if (state == ThresholdState::ACTIVE) {
@@ -2581,6 +2605,18 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
+
+ {
+ // Prune locks that began at or after the tip should be moved backward so they get a chance to reorg
+ const int max_height_first{pindexDelete->nHeight - 1};
+ for (auto& prune_lock : m_blockman.m_prune_locks) {
+ if (prune_lock.second.height_first <= max_height_first) continue;
+
+ prune_lock.second.height_first = max_height_first;
+ LogPrint(BCLog::PRUNE, "%s prune lock moved back to %d\n", prune_lock.first, max_height_first);
+ }
+ }
+
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
@@ -4077,10 +4113,11 @@ bool CChainState::ReplayBlocks()
// Roll forward from the forking point to the new tip.
int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
- const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
- LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
+ const CBlockIndex& pindex{*Assert(pindexNew->GetAncestor(nHeight))};
+
+ LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(), nHeight);
uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
- if (!RollforwardBlock(pindex, cache)) return false;
+ if (!RollforwardBlock(&pindex, cache)) return false;
}
cache.SetBestBlock(pindexNew->GetBlockHash());
@@ -4114,20 +4151,6 @@ void CChainState::UnloadBlockIndex()
setBlockIndexCandidates.clear();
}
-// May NOT be used after any connections are up as much
-// of the peer-processing logic assumes a consistent
-// block index state
-void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman)
-{
- AssertLockHeld(::cs_main);
- chainman.Unload();
- if (mempool) mempool->clear();
- g_versionbitscache.Clear();
- for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
- warningcache[b].clear();
- }
-}
-
bool ChainstateManager::LoadBlockIndex()
{
AssertLockHeld(cs_main);
@@ -5158,20 +5181,6 @@ bool ChainstateManager::IsSnapshotActive() const
return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
}
-void ChainstateManager::Unload()
-{
- AssertLockHeld(::cs_main);
- for (CChainState* chainstate : this->GetAll()) {
- chainstate->m_chain.SetTip(nullptr);
- chainstate->UnloadBlockIndex();
- }
-
- m_failed_blocks.clear();
- m_blockman.Unload();
- m_best_header = nullptr;
- m_best_invalid = nullptr;
-}
-
void ChainstateManager::MaybeRebalanceCaches()
{
AssertLockHeld(::cs_main);
@@ -5202,3 +5211,15 @@ void ChainstateManager::MaybeRebalanceCaches()
}
}
}
+
+ChainstateManager::~ChainstateManager()
+{
+ LOCK(::cs_main);
+
+ // TODO: The version bits cache and warning cache should probably become
+ // non-globals
+ g_versionbitscache.Clear();
+ for (auto& i : warningcache) {
+ i.clear();
+ }
+}