aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPieter Wuille <pieter.wuille@gmail.com>2014-10-07 21:15:32 +0200
committerPieter Wuille <pieter.wuille@gmail.com>2014-10-14 15:42:01 -0700
commit16d5194165c8c83492b95f431a664d98c40ff254 (patch)
tree4254323afb8db2c4940e305569f6a4eec98815c5 /src
parentad96e7ccd94679d9125a436ceb5b476aacdfca82 (diff)
Skip reindexed blocks individually
Instead of skipping to the last reindexed block in each file (which could jump over processed out-of-order blocks), just skip each already processed block individually.
Diffstat (limited to 'src')
-rw-r--r--src/main.cpp49
1 files changed, 16 insertions, 33 deletions
diff --git a/src/main.cpp b/src/main.cpp
index ad133e1bb8..908f4c95b6 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -3078,15 +3078,6 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
- uint64_t nStartByte = 0;
- if (dbp) {
- // (try to) skip already indexed part
- CBlockFileInfo info;
- if (pblocktree->ReadBlockFileInfo(dbp->nFile, info)) {
- nStartByte = info.nSize;
- blkdat.Seek(info.nSize);
- }
- }
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
@@ -3114,40 +3105,32 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
- if (nBlockPos < nStartByte) // skip already indexed part
- continue;
if (dbp)
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
-
- // read block header
- CBlockHeader blockhdr;
- blkdat >> blockhdr;
+ blkdat.SetPos(nBlockPos);
+ CBlock block;
+ blkdat >> block;
nRewind = blkdat.GetPos();
- // process block header
- uint256 hash = blockhdr.GetHash();
- if (hash != Params().HashGenesisBlock() && mapBlockIndex.find(blockhdr.hashPrevBlock) == mapBlockIndex.end()) {
+ // detect out of order blocks, and store them for later
+ uint256 hash = block.GetHash();
+ if (hash != Params().HashGenesisBlock() && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
- blockhdr.hashPrevBlock.ToString());
+ block.hashPrevBlock.ToString());
if (dbp)
- mapBlocksUnknownParent.insert(std::make_pair(blockhdr.hashPrevBlock, *dbp));
- // TODO a slight optimization would be: blkdat.Skip(nSize - 80)
+ mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
continue;
}
- // read block
- blkdat.SetPos(nBlockPos);
- CBlock block;
- blkdat >> block;
- nRewind = blkdat.GetPos();
-
- // process block
- CValidationState state;
- if (ProcessBlock(state, NULL, &block, dbp))
- nLoaded++;
- if (state.IsError())
- break;
+ // process in case the block isn't known yet
+ if (mapBlockIndex.count(hash) == 0) {
+ CValidationState state;
+ if (ProcessBlock(state, NULL, &block, dbp))
+ nLoaded++;
+ if (state.IsError())
+ break;
+ }
// Recursively process earlier encountered successors of this block
deque<uint256> queue;