aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp336
1 files changed, 278 insertions, 58 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 8dd61a3720..e6248c6617 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -52,9 +52,12 @@ int nScriptCheckThreads = 0;
bool fImporting = false;
bool fReindex = false;
bool fTxIndex = false;
+bool fHavePruned = false;
+bool fPruneMode = false;
bool fIsBareMultisigStd = true;
bool fCheckBlockIndex = false;
unsigned int nCoinCacheSize = 5000;
+uint64_t nPruneTarget = 0;
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
CFeeRate minRelayTxFee = CFeeRate(1000);
@@ -110,17 +113,25 @@ namespace {
/**
* The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
- * as good as our current tip or better. Entries may be failed, though.
+ * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
+ * missing the data for the block.
*/
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
- /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. */
+ /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
+ * Pruned nodes may have entries where B is missing data.
+ */
multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
+ /** Global flag to indicate we should check to see if there are
+ * block/undo files that should be deleted. Set on startup
+ * or if we allocate more file space when we're in prune mode
+ */
+ bool fCheckForPruning = false;
/**
* Every received block is assigned a unique and increasing identifier, so we
@@ -1233,14 +1244,14 @@ void CheckForkWarningConditions()
}
if (pindexBestForkTip && pindexBestForkBase)
{
- LogPrintf("CheckForkWarningConditions: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n",
+ LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
fLargeWorkForkFound = true;
}
else
{
- LogPrintf("CheckForkWarningConditions: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n");
+ LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
fLargeWorkInvalidChainFound = true;
}
}
@@ -1298,10 +1309,10 @@ void Misbehaving(NodeId pnode, int howmuch)
int banscore = GetArg("-banscore", 100);
if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
{
- LogPrintf("Misbehaving: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
+ LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
state->fShouldBan = true;
} else
- LogPrintf("Misbehaving: %s (%d -> %d)\n", state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
+ LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
@@ -1309,11 +1320,11 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
- LogPrintf("InvalidChainFound: invalid block=%s height=%d log2_work=%.8g date=%s\n",
+ LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()));
- LogPrintf("InvalidChainFound: current best=%s height=%d log2_work=%.8g date=%s\n",
+ LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()));
CheckForkWarningConditions();
@@ -1849,6 +1860,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
}
enum FlushStateMode {
+ FLUSH_STATE_NONE,
FLUSH_STATE_IF_NEEDED,
FLUSH_STATE_PERIODIC,
FLUSH_STATE_ALWAYS
@@ -1856,16 +1868,30 @@ enum FlushStateMode {
/**
* Update the on-disk chain state.
- * The caches and indexes are flushed if either they're too large, forceWrite is set, or
- * fast is not set and it's been a while since the last write.
+ * The caches and indexes are flushed depending on the mode we're called with
+ * if they're too large, if it's been a while since the last write,
+ * or always and in all cases if we're in prune mode and are deleting files.
*/
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
- LOCK(cs_main);
+ LOCK2(cs_main, cs_LastBlockFile);
static int64_t nLastWrite = 0;
+ std::set<int> setFilesToPrune;
+ bool fFlushForPrune = false;
try {
+ if (fPruneMode && fCheckForPruning) {
+ FindFilesToPrune(setFilesToPrune);
+ if (!setFilesToPrune.empty()) {
+ fFlushForPrune = true;
+ if (!fHavePruned) {
+ pblocktree->WriteFlag("prunedblockfiles", true);
+ fHavePruned = true;
+ }
+ }
+ }
if ((mode == FLUSH_STATE_ALWAYS) ||
((mode == FLUSH_STATE_PERIODIC || mode == FLUSH_STATE_IF_NEEDED) && pcoinsTip->GetCacheSize() > nCoinCacheSize) ||
- (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000)) {
+ (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000) ||
+ fFlushForPrune) {
// Typical CCoins structures on disk are around 100 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
@@ -1893,9 +1919,16 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
return state.Abort("Files to write to block index database");
}
}
- // Finally flush the chainstate (which may refer to block index entries).
+ // Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return state.Abort("Failed to write to coin database");
+
+ // Finally remove any pruned files
+ if (fFlushForPrune) {
+ UnlinkPrunedFiles(setFilesToPrune);
+ fCheckForPruning = false;
+ }
+
// Update best block in wallet (so we can detect restored wallets).
if (mode != FLUSH_STATE_IF_NEEDED) {
GetMainSignals().SetBestChain(chainActive.GetLocator());
@@ -1913,6 +1946,12 @@ void FlushStateToDisk() {
FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
}
+void PruneAndFlush() {
+ CValidationState state;
+ fCheckForPruning = true;
+ FlushStateToDisk(state, FLUSH_STATE_NONE);
+}
+
/** Update chainActive and related internal data structures. */
void static UpdateTip(CBlockIndex *pindexNew) {
chainActive.SetTip(pindexNew);
@@ -1921,7 +1960,7 @@ void static UpdateTip(CBlockIndex *pindexNew) {
nTimeBestReceived = GetTime();
mempool.AddTransactionsUpdated(1);
- LogPrintf("UpdateTip: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%u\n",
+ LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%u\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
Checkpoints::GuessVerificationProgress(chainActive.Tip()), (unsigned int)pcoinsTip->GetCacheSize());
@@ -1941,7 +1980,7 @@ void static UpdateTip(CBlockIndex *pindexNew) {
pindex = pindex->pprev;
}
if (nUpgraded > 0)
- LogPrintf("SetBestChain: %d of last 100 blocks above version %d\n", nUpgraded, (int)CBlock::CURRENT_VERSION);
+ LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::CURRENT_VERSION);
if (nUpgraded > 100/2)
{
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
@@ -2083,15 +2122,29 @@ static CBlockIndex* FindMostWorkChain() {
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
- assert(pindexTest->nStatus & BLOCK_HAVE_DATA);
assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
- if (pindexTest->nStatus & BLOCK_FAILED_MASK) {
- // Candidate has an invalid ancestor, remove entire chain from the set.
- if (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
+
+ // Pruned nodes may have entries in setBlockIndexCandidates for
+ // which block files have been deleted. Remove those as candidates
+ // for the most work chain if we come across them; we can't switch
+ // to a chain unless we have all the non-active-chain parent blocks.
+ bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
+ bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
+ if (fFailedChain || fMissingData) {
+ // Candidate chain is not usable (either invalid or missing data)
+ if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
+ // Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
- pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
+ if (fFailedChain) {
+ pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
+ } else if (fMissingData) {
+ // If we're missing data, then add back to mapBlocksUnlinked,
+ // so that if the block arrives in the future we can try adding
+ // to setBlockIndexCandidates again.
+ mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
+ }
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
@@ -2219,7 +2272,9 @@ bool ActivateBestChain(CValidationState &state, CBlock *pblock) {
uint256 hashNewTip = pindexNewTip->GetBlockHash();
// Relay inventory, but don't relay old inventory during initial block download.
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
- {
+ // Don't relay blocks if pruning -- could cause a peer to try to download, resulting
+ // in a stalled download if the block file is pruned before the request.
+ if (nLocalServices & NODE_NETWORK) {
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
@@ -2350,10 +2405,6 @@ bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBl
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
- {
- LOCK(cs_nBlockSequenceId);
- pindexNew->nSequenceId = nBlockSequenceId++;
- }
setDirtyBlockIndex.insert(pindexNew);
if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
@@ -2366,6 +2417,10 @@ bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBl
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
+ {
+ LOCK(cs_nBlockSequenceId);
+ pindex->nSequenceId = nBlockSequenceId++;
+ }
if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
@@ -2419,6 +2474,8 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
+ if (fPruneMode)
+ fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
@@ -2450,6 +2507,8 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
+ if (fPruneMode)
+ fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
@@ -2665,7 +2724,10 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex,
if (!AcceptBlockHeader(block, state, &pindex))
return false;
- if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ // If we're pruning, ensure that we don't allow a peer to dump a copy
+ // of old blocks. But we might need blocks that are not on the main chain
+ // to handle a reorg, even if we've processed once.
+ if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
// TODO: deal better with duplicate blocks.
// return state.DoS(20, error("AcceptBlock(): already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate");
return true;
@@ -2698,6 +2760,9 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex,
return state.Abort(std::string("System error: ") + e.what());
}
+ if (fCheckForPruning)
+ FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files
+
return true;
}
@@ -2785,6 +2850,112 @@ bool AbortNode(const std::string &strMessage, const std::string &userMessage) {
return false;
}
+
+/**
+ * BLOCK PRUNING CODE
+ */
+
+/* Calculate the amount of disk space the block & undo files currently use */
+uint64_t CalculateCurrentUsage()
+{
+ uint64_t retval = 0;
+ BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) {
+ retval += file.nSize + file.nUndoSize;
+ }
+ return retval;
+}
+
+/* Prune a block file (modify associated database entries)*/
+void PruneOneBlockFile(const int fileNumber)
+{
+ for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
+ CBlockIndex* pindex = it->second;
+ if (pindex->nFile == fileNumber) {
+ pindex->nStatus &= ~BLOCK_HAVE_DATA;
+ pindex->nStatus &= ~BLOCK_HAVE_UNDO;
+ pindex->nFile = 0;
+ pindex->nDataPos = 0;
+ pindex->nUndoPos = 0;
+ setDirtyBlockIndex.insert(pindex);
+
+ // Prune from mapBlocksUnlinked -- any block we prune would have
+ // to be downloaded again in order to consider its chain, at which
+ // point it would be considered as a candidate for
+ // mapBlocksUnlinked or setBlockIndexCandidates.
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
+ while (range.first != range.second) {
+ std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first;
+ range.first++;
+ if (it->second == pindex) {
+ mapBlocksUnlinked.erase(it);
+ }
+ }
+ }
+ }
+
+ vinfoBlockFile[fileNumber].SetNull();
+ setDirtyFileInfo.insert(fileNumber);
+}
+
+
+void UnlinkPrunedFiles(std::set<int>& setFilesToPrune)
+{
+ for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
+ CDiskBlockPos pos(*it, 0);
+ boost::filesystem::remove(GetBlockPosFilename(pos, "blk"));
+ boost::filesystem::remove(GetBlockPosFilename(pos, "rev"));
+ LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
+ }
+}
+
+/* Calculate the block/rev files that should be deleted to remain under target*/
+void FindFilesToPrune(std::set<int>& setFilesToPrune)
+{
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chainActive.Tip() == NULL || nPruneTarget == 0) {
+ return;
+ }
+ if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) {
+ return;
+ }
+
+ unsigned int nLastBlockWeMustKeep = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
+ uint64_t nCurrentUsage = CalculateCurrentUsage();
+ // We don't check to prune until after we've allocated new space for files
+ // So we should leave a buffer under our target to account for another allocation
+ // before the next pruning.
+ uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
+ uint64_t nBytesToPrune;
+ int count=0;
+
+ if (nCurrentUsage + nBuffer >= nPruneTarget) {
+ for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
+ nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
+
+ if (vinfoBlockFile[fileNumber].nSize == 0)
+ continue;
+
+ if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
+ break;
+
+ // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip
+ if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeMustKeep)
+ break;
+
+ PruneOneBlockFile(fileNumber);
+ // Queue up the files for removal
+ setFilesToPrune.insert(fileNumber);
+ nCurrentUsage -= nBytesToPrune;
+ count++;
+ }
+ }
+
+ LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB min_must_keep=%d removed %d blk/rev pairs\n",
+ nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
+ ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
+ nLastBlockWeMustKeep, count);
+}
+
bool CheckDiskSpace(uint64_t nAdditionalBytes)
{
uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available;
@@ -2872,7 +3043,9 @@ bool static LoadBlockIndexDB()
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
- if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ // We can link the chain of blocks for which we've received transactions at some point.
+ // Pruned nodes may have deleted the block.
+ if (pindex->nTx > 0) {
if (pindex->pprev) {
if (pindex->pprev->nChainTx) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
@@ -2929,6 +3102,11 @@ bool static LoadBlockIndexDB()
}
}
+ // Check whether we have ever pruned block & undo files
+ pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
+ if (fHavePruned)
+ LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
+
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
@@ -2936,7 +3114,7 @@ bool static LoadBlockIndexDB()
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
- LogPrintf("LoadBlockIndexDB(): transaction index %s\n", fTxIndex ? "enabled" : "disabled");
+ LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
@@ -2946,7 +3124,7 @@ bool static LoadBlockIndexDB()
PruneBlockIndexCandidates();
- LogPrintf("LoadBlockIndexDB(): hashBestChain=%s height=%d date=%s progress=%f\n",
+ LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
Checkpoints::GuessVerificationProgress(chainActive.Tip()));
@@ -3069,6 +3247,7 @@ void UnloadBlockIndex()
delete entry.second;
}
mapBlockIndex.clear();
+ fHavePruned = false;
}
bool LoadBlockIndex()
@@ -3260,6 +3439,7 @@ void static CheckBlockIndex()
int nHeight = 0;
CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid.
CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
+ CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0.
CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
@@ -3268,6 +3448,7 @@ void static CheckBlockIndex()
nNodes++;
if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
+ if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
@@ -3279,11 +3460,21 @@ void static CheckBlockIndex()
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
}
- // HAVE_DATA is equivalent to VALID_TRANSACTIONS and equivalent to nTx > 0 (we stored the number of transactions in the block)
- assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
- assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0));
- // All parents having data is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
- assert((pindexFirstMissing != NULL) == (pindex->nChainTx == 0)); // nChainTx == 0 is used to signal that all parent block's transaction data is available.
+ if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked
+ // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
+ // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
+ if (!fHavePruned) {
+ // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
+ assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
+ assert(pindexFirstMissing == pindexFirstNeverProcessed);
+ } else {
+ // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
+ if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
+ }
+ if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
+ assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
+ // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
+ assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
@@ -3296,11 +3487,20 @@ void static CheckBlockIndex()
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstMissing == NULL) {
- if (pindexFirstInvalid == NULL) { // If this block sorts at least as good as the current tip and is valid, it must be in setBlockIndexCandidates.
- assert(setBlockIndexCandidates.count(pindex));
+ if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) {
+ if (pindexFirstInvalid == NULL) {
+ // If this block sorts at least as good as the current tip and
+ // is valid and we have all data for its parents, it must be in
+ // setBlockIndexCandidates. chainActive.Tip() must also be there
+ // even if some data has been pruned.
+ if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) {
+ assert(setBlockIndexCandidates.count(pindex));
+ }
+ // If some parent is missing, then it could be that this block was in
+ // setBlockIndexCandidates but had to be removed because of the missing data.
+ // In this case it must be in mapBlocksUnlinked -- see test below.
}
- } else { // If this block sorts worse than the current tip, it cannot be in setBlockIndexCandidates.
+ } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in mapBlocksUnlinked.
@@ -3314,12 +3514,28 @@ void static CheckBlockIndex()
}
rangeUnlinked.first++;
}
- if (pindex->pprev && pindex->nStatus & BLOCK_HAVE_DATA && pindexFirstMissing != NULL) {
- if (pindexFirstInvalid == NULL) { // If this block has block data available, some parent doesn't, and has no invalid parents, it must be in mapBlocksUnlinked.
- assert(foundInUnlinked);
+ if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) {
+ // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
+ assert(foundInUnlinked);
+ }
+ if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
+ if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
+ if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) {
+ // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
+ assert(fHavePruned); // We must have pruned.
+ // This block may have entered mapBlocksUnlinked if:
+ // - it has a descendant that at some point had more work than the
+ // tip, and
+ // - we tried switching to that descendant but were missing
+ // data for some intermediate block between chainActive and the
+ // tip.
+ // So if this block is itself better than chainActive.Tip() and it wasn't in
+ // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
+ if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
+ if (pindexFirstInvalid == NULL) {
+ assert(foundInUnlinked);
+ }
}
- } else { // If this block does not have block data available, or all parents do, it cannot be in mapBlocksUnlinked.
- assert(!foundInUnlinked);
}
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// End: actual consistency checks.
@@ -3339,6 +3555,7 @@ void static CheckBlockIndex()
// If pindex was the first with a certain property, unset the corresponding variable.
if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL;
if (pindex == pindexFirstMissing) pindexFirstMissing = NULL;
+ if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL;
if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL;
if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL;
if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL;
@@ -3458,7 +3675,6 @@ bool static AlreadyHave(const CInv& inv)
return true;
}
-
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
@@ -3486,17 +3702,21 @@ void static ProcessGetData(CNode* pfrom)
if (chainActive.Contains(mi->second)) {
send = true;
} else {
+ static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
- // chain if they are valid, and no more than a month older than the best header
- // chain we know about.
+ // chain if they are valid, and no more than a month older (both in time, and in
+ // best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
- (mi->second->GetBlockTime() > pindexBestHeader->GetBlockTime() - 30 * 24 * 60 * 60);
+ (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
+ (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
if (!send) {
- LogPrintf("ProcessGetData(): ignoring request from peer=%i for old block that isn't in the main chain\n", pfrom->GetId());
+ LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
- if (send)
+ // Pruned nodes may have deleted the block, so check whether
+ // it's available before trying to send.
+ if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
// Send block from disk
CBlock block;
@@ -4394,7 +4614,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool ProcessMessages(CNode* pfrom)
{
//if (fDebug)
- // LogPrintf("ProcessMessages(%u messages)\n", pfrom->vRecvMsg.size());
+ // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
//
// Message format
@@ -4422,7 +4642,7 @@ bool ProcessMessages(CNode* pfrom)
CNetMessage& msg = *it;
//if (fDebug)
- // LogPrintf("ProcessMessages(message %u msgsz, %u bytes, complete:%s)\n",
+ // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
// msg.hdr.nMessageSize, msg.vRecv.size(),
// msg.complete() ? "Y" : "N");
@@ -4458,7 +4678,7 @@ bool ProcessMessages(CNode* pfrom)
unsigned int nChecksum = ReadLE32((unsigned char*)&hash);
if (nChecksum != hdr.nChecksum)
{
- LogPrintf("ProcessMessages(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n",
+ LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__,
SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
@@ -4476,12 +4696,12 @@ bool ProcessMessages(CNode* pfrom)
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
- LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", SanitizeString(strCommand), nMessageSize, e.what());
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from over-long size
- LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught\n", SanitizeString(strCommand), nMessageSize, e.what());
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else
{
@@ -4498,7 +4718,7 @@ bool ProcessMessages(CNode* pfrom)
}
if (!fRet)
- LogPrintf("ProcessMessage(%s, %u bytes) FAILED peer=%d\n", SanitizeString(strCommand), nMessageSize, pfrom->id);
+ LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
break;
}
@@ -4620,9 +4840,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
if (pindexBestHeader == NULL)
pindexBestHeader = chainActive.Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
- if (!state.fSyncStarted && !pto->fClient && fFetch && !fImporting && !fReindex) {
+ if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
- if (nSyncStarted == 0 || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
+ if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
nSyncStarted++;
CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
@@ -4710,7 +4930,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
// Message: getdata (blocks)
//
vector<CInv> vGetData;
- if (!pto->fDisconnect && !pto->fClient && fFetch && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vector<CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);