aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp10
1 files changed, 7 insertions, 3 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 82d52913a0..4aa49531b3 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1760,9 +1760,9 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C
}
// Update the on-disk chain state.
-bool static WriteChainState(CValidationState &state) {
+bool static WriteChainState(CValidationState &state, bool forceWrite=false) {
static int64_t nLastWrite = 0;
- if (pcoinsTip->GetCacheSize() > nCoinCacheSize || (!IsInitialBlockDownload() && GetTimeMicros() > nLastWrite + 600*1000000)) {
+ if (forceWrite || pcoinsTip->GetCacheSize() > nCoinCacheSize || (!IsInitialBlockDownload() && GetTimeMicros() > nLastWrite + 600*1000000)) {
// Typical CCoins structures on disk are around 100 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
@@ -3022,6 +3022,8 @@ bool InitBlockIndex() {
return error("LoadBlockIndex() : genesis block not accepted");
if (!ActivateBestChain(state, &block))
return error("LoadBlockIndex() : genesis block cannot be activated");
+ // Force a chainstate write so that when we VerifyDB in a moment, it doesnt check stale data
+ return WriteChainState(state, true);
} catch(std::runtime_error &e) {
return error("LoadBlockIndex() : failed to initialize block database: %s", e.what());
}
@@ -3157,12 +3159,14 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
}
// process in case the block isn't known yet
- if (mapBlockIndex.count(hash) == 0) {
+ if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
if (ProcessNewBlock(state, NULL, &block, dbp))
nLoaded++;
if (state.IsError())
break;
+ } else if (hash != Params().HashGenesisBlock() && mapBlockIndex[hash]->nHeight % 1000 == 0) {
+ LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
}
// Recursively process earlier encountered successors of this block