diff options
Diffstat (limited to 'src/main.cpp')
-rw-r--r-- | src/main.cpp | 395 |
1 files changed, 314 insertions, 81 deletions
diff --git a/src/main.cpp b/src/main.cpp index 45e6e4d25d..79ee4e55ec 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -52,9 +52,13 @@ int nScriptCheckThreads = 0; bool fImporting = false; bool fReindex = false; bool fTxIndex = false; +bool fHavePruned = false; +bool fPruneMode = false; bool fIsBareMultisigStd = true; bool fCheckBlockIndex = false; +bool fCheckpointsEnabled = true; unsigned int nCoinCacheSize = 5000; +uint64_t nPruneTarget = 0; /** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */ CFeeRate minRelayTxFee = CFeeRate(1000); @@ -110,17 +114,25 @@ namespace { /** * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and - * as good as our current tip or better. Entries may be failed, though. + * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be + * missing the data for the block. */ set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates; /** Number of nodes with fSyncStarted. */ int nSyncStarted = 0; - /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. */ + /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. + * Pruned nodes may have entries where B is missing data. + */ multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked; CCriticalSection cs_LastBlockFile; std::vector<CBlockFileInfo> vinfoBlockFile; int nLastBlockFile = 0; + /** Global flag to indicate we should check to see if there are + * block/undo files that should be deleted. Set on startup + * or if we allocate more file space when we're in prune mode + */ + bool fCheckForPruning = false; /** * Every received block is assigned a unique and increasing identifier, so we @@ -131,8 +143,9 @@ namespace { uint32_t nBlockSequenceId = 1; /** - * Sources of received blocks, to be able to send them reject messages or ban - * them, if processing happens afterwards. Protected by cs_main. + * Sources of received blocks, saved to be able to send them reject + * messages or ban them when processing happens afterwards. Protected by + * cs_main. */ map<uint256, NodeId> mapBlockSource; @@ -378,7 +391,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl } // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor - // of their current tip anymore. Go back enough to fix that. + // of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; @@ -930,7 +943,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // do all inputs exist? // Note that this does not check for the presence of actual outputs (see the next check for that), - // only helps filling in pfMissingInputs (to determine missing vs spent). + // and only helps with filling in pfMissingInputs (to determine missing vs spent). BOOST_FOREACH(const CTxIn txin, tx.vin) { if (!view.HaveCoins(txin.prevout.hash)) { if (pfMissingInputs) @@ -1193,8 +1206,11 @@ CAmount GetBlockValue(int nHeight, const CAmount& nFees) bool IsInitialBlockDownload() { + const CChainParams& chainParams = Params(); LOCK(cs_main); - if (fImporting || fReindex || chainActive.Height() < Checkpoints::GetTotalBlocksEstimate()) + if (fImporting || fReindex) + return true; + if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints())) return true; static bool lockIBDState = false; if (lockIBDState) @@ -1266,8 +1282,8 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) pfork = pfork->pprev; } - // We define a condition which we should warn the user about as a fork of at least 7 blocks - // who's tip is within 72 blocks (+/- 12 hours if no one mines it) of ours + // We define a condition where we should warn the user about as a fork of at least 7 blocks + // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network // hash rate operating on the fork. // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) @@ -1698,7 +1714,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin return true; } - bool fScriptChecks = pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(); + bool fScriptChecks = (!fCheckpointsEnabled || pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints())); // Do not allow blocks that contain transactions which 'overwrite' older transactions, // unless those are already completely spent. @@ -1708,9 +1724,9 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information. // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool // already refuses previously-known transaction ids entirely. - // This rule was originally applied all blocks whose timestamp was after March 15, 2012, 0:00 UTC. + // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC. // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the - // two in the chain that violate it. This prevents exploiting the issue against nodes in their + // two in the chain that violate it. This prevents exploiting the issue against nodes during their // initial block download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash. !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || @@ -1849,6 +1865,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin } enum FlushStateMode { + FLUSH_STATE_NONE, FLUSH_STATE_IF_NEEDED, FLUSH_STATE_PERIODIC, FLUSH_STATE_ALWAYS @@ -1856,16 +1873,30 @@ enum FlushStateMode { /** * Update the on-disk chain state. - * The caches and indexes are flushed if either they're too large, forceWrite is set, or - * fast is not set and it's been a while since the last write. + * The caches and indexes are flushed depending on the mode we're called with + * if they're too large, if it's been a while since the last write, + * or always and in all cases if we're in prune mode and are deleting files. */ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { - LOCK(cs_main); + LOCK2(cs_main, cs_LastBlockFile); static int64_t nLastWrite = 0; + std::set<int> setFilesToPrune; + bool fFlushForPrune = false; try { + if (fPruneMode && fCheckForPruning) { + FindFilesToPrune(setFilesToPrune); + if (!setFilesToPrune.empty()) { + fFlushForPrune = true; + if (!fHavePruned) { + pblocktree->WriteFlag("prunedblockfiles", true); + fHavePruned = true; + } + } + } if ((mode == FLUSH_STATE_ALWAYS) || ((mode == FLUSH_STATE_PERIODIC || mode == FLUSH_STATE_IF_NEEDED) && pcoinsTip->GetCacheSize() > nCoinCacheSize) || - (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000)) { + (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000) || + fFlushForPrune) { // Typical CCoins structures on disk are around 100 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is already @@ -1893,9 +1924,16 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { return state.Abort("Files to write to block index database"); } } - // Finally flush the chainstate (which may refer to block index entries). + // Flush the chainstate (which may refer to block index entries). if (!pcoinsTip->Flush()) return state.Abort("Failed to write to coin database"); + + // Finally remove any pruned files + if (fFlushForPrune) { + UnlinkPrunedFiles(setFilesToPrune); + fCheckForPruning = false; + } + // Update best block in wallet (so we can detect restored wallets). if (mode != FLUSH_STATE_IF_NEEDED) { GetMainSignals().SetBestChain(chainActive.GetLocator()); @@ -1913,8 +1951,15 @@ void FlushStateToDisk() { FlushStateToDisk(state, FLUSH_STATE_ALWAYS); } +void PruneAndFlush() { + CValidationState state; + fCheckForPruning = true; + FlushStateToDisk(state, FLUSH_STATE_NONE); +} + /** Update chainActive and related internal data structures. */ void static UpdateTip(CBlockIndex *pindexNew) { + const CChainParams& chainParams = Params(); chainActive.SetTip(pindexNew); // New best block @@ -1924,7 +1969,7 @@ void static UpdateTip(CBlockIndex *pindexNew) { LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%u\n", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), - Checkpoints::GuessVerificationProgress(chainActive.Tip()), (unsigned int)pcoinsTip->GetCacheSize()); + Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), (unsigned int)pcoinsTip->GetCacheSize()); cvBlockChange.notify_all(); @@ -1945,7 +1990,7 @@ void static UpdateTip(CBlockIndex *pindexNew) { if (nUpgraded > 100/2) { // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user: - strMiscWarning = _("Warning: This version is obsolete, upgrade required!"); + strMiscWarning = _("Warning: This version is obsolete; upgrade required!"); CAlert::Notify(strMiscWarning, true); fWarned = true; } @@ -2083,15 +2128,29 @@ static CBlockIndex* FindMostWorkChain() { CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; while (pindexTest && !chainActive.Contains(pindexTest)) { - assert(pindexTest->nStatus & BLOCK_HAVE_DATA); assert(pindexTest->nChainTx || pindexTest->nHeight == 0); - if (pindexTest->nStatus & BLOCK_FAILED_MASK) { - // Candidate has an invalid ancestor, remove entire chain from the set. - if (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork) + + // Pruned nodes may have entries in setBlockIndexCandidates for + // which block files have been deleted. Remove those as candidates + // for the most work chain if we come across them; we can't switch + // to a chain unless we have all the non-active-chain parent blocks. + bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; + bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); + if (fFailedChain || fMissingData) { + // Candidate chain is not usable (either invalid or missing data) + if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindexNew; CBlockIndex *pindexFailed = pindexNew; + // Remove the entire chain from the set. while (pindexTest != pindexFailed) { - pindexFailed->nStatus |= BLOCK_FAILED_CHILD; + if (fFailedChain) { + pindexFailed->nStatus |= BLOCK_FAILED_CHILD; + } else if (fMissingData) { + // If we're missing data, then add back to mapBlocksUnlinked, + // so that if the block arrives in the future we can try adding + // to setBlockIndexCandidates again. + mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); + } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; } @@ -2194,6 +2253,7 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo bool ActivateBestChain(CValidationState &state, CBlock *pblock) { CBlockIndex *pindexNewTip = NULL; CBlockIndex *pindexMostWork = NULL; + const CChainParams& chainParams = Params(); do { boost::this_thread::interruption_point(); @@ -2218,8 +2278,12 @@ bool ActivateBestChain(CValidationState &state, CBlock *pblock) { if (!fInitialDownload) { uint256 hashNewTip = pindexNewTip->GetBlockHash(); // Relay inventory, but don't relay old inventory during initial block download. - int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(); - { + int nBlockEstimate = 0; + if (fCheckpointsEnabled) + nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()); + // Don't relay blocks if pruning -- could cause a peer to try to download, resulting + // in a stalled download if the block file is pruned before the request. + if (nLocalServices & NODE_NETWORK) { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) @@ -2260,7 +2324,7 @@ bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) { } // The resulting new best tip may not be in setBlockIndexCandidates anymore, so - // add them again. + // add it again. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { @@ -2419,6 +2483,8 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { + if (fPruneMode) + fCheckForPruning = true; if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenBlockFile(pos); if (file) { @@ -2450,6 +2516,8 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { + if (fPruneMode) + fCheckForPruning = true; if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenUndoFile(pos); if (file) { @@ -2542,7 +2610,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev) { - const Consensus::Params& consensusParams = Params().GetConsensus(); + const CChainParams& chainParams = Params(); + const Consensus::Params& consensusParams = chainParams.GetConsensus(); uint256 hash = block.GetHash(); if (hash == consensusParams.hashGenesisBlock) return true; @@ -2552,7 +2621,7 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta int nHeight = pindexPrev->nHeight+1; // Check proof of work - if (block.nBits != GetNextWorkRequired(pindexPrev, &block, Params().GetConsensus())) + if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) return state.DoS(100, error("%s: incorrect proof of work", __func__), REJECT_INVALID, "bad-diffbits"); @@ -2561,25 +2630,28 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta return state.Invalid(error("%s: block's timestamp is too early", __func__), REJECT_INVALID, "time-too-old"); - // Check that the block chain matches the known block chain up to a checkpoint - if (!Checkpoints::CheckBlock(nHeight, hash)) - return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight), - REJECT_CHECKPOINT, "checkpoint mismatch"); + if(fCheckpointsEnabled) + { + // Check that the block chain matches the known block chain up to a checkpoint + if (!Checkpoints::CheckBlock(chainParams.Checkpoints(), nHeight, hash)) + return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight), + REJECT_CHECKPOINT, "checkpoint mismatch"); - // Don't accept any forks from the main chain prior to last checkpoint - CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(); - if (pcheckpoint && nHeight < pcheckpoint->nHeight) - return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight)); + // Don't accept any forks from the main chain prior to last checkpoint + CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints()); + if (pcheckpoint && nHeight < pcheckpoint->nHeight) + return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight)); + } // Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded: - if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, Params().RejectBlockOutdatedMajority())) + if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityRejectBlockOutdated)) { return state.Invalid(error("%s: rejected nVersion=1 block", __func__), REJECT_OBSOLETE, "bad-version"); } // Reject block.nVersion=2 blocks when 95% (75% on testnet) of the network has upgraded: - if (block.nVersion < 3 && IsSuperMajority(3, pindexPrev, Params().RejectBlockOutdatedMajority())) + if (block.nVersion < 3 && IsSuperMajority(3, pindexPrev, consensusParams.nMajorityRejectBlockOutdated)) { return state.Invalid(error("%s : rejected nVersion=2 block", __func__), REJECT_OBSOLETE, "bad-version"); @@ -2665,7 +2737,10 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, if (!AcceptBlockHeader(block, state, &pindex)) return false; - if (pindex->nStatus & BLOCK_HAVE_DATA) { + // If we're pruning, ensure that we don't allow a peer to dump a copy + // of old blocks. But we might need blocks that are not on the main chain + // to handle a reorg, even if we've processed once. + if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { // TODO: deal better with duplicate blocks. // return state.DoS(20, error("AcceptBlock(): already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate"); return true; @@ -2698,6 +2773,9 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, return state.Abort(std::string("System error: ") + e.what()); } + if (fCheckForPruning) + FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files + return true; } @@ -2785,6 +2863,112 @@ bool AbortNode(const std::string &strMessage, const std::string &userMessage) { return false; } + +/** + * BLOCK PRUNING CODE + */ + +/* Calculate the amount of disk space the block & undo files currently use */ +uint64_t CalculateCurrentUsage() +{ + uint64_t retval = 0; + BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) { + retval += file.nSize + file.nUndoSize; + } + return retval; +} + +/* Prune a block file (modify associated database entries)*/ +void PruneOneBlockFile(const int fileNumber) +{ + for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) { + CBlockIndex* pindex = it->second; + if (pindex->nFile == fileNumber) { + pindex->nStatus &= ~BLOCK_HAVE_DATA; + pindex->nStatus &= ~BLOCK_HAVE_UNDO; + pindex->nFile = 0; + pindex->nDataPos = 0; + pindex->nUndoPos = 0; + setDirtyBlockIndex.insert(pindex); + + // Prune from mapBlocksUnlinked -- any block we prune would have + // to be downloaded again in order to consider its chain, at which + // point it would be considered as a candidate for + // mapBlocksUnlinked or setBlockIndexCandidates. + std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); + while (range.first != range.second) { + std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first; + range.first++; + if (it->second == pindex) { + mapBlocksUnlinked.erase(it); + } + } + } + } + + vinfoBlockFile[fileNumber].SetNull(); + setDirtyFileInfo.insert(fileNumber); +} + + +void UnlinkPrunedFiles(std::set<int>& setFilesToPrune) +{ + for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { + CDiskBlockPos pos(*it, 0); + boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); + boost::filesystem::remove(GetBlockPosFilename(pos, "rev")); + LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it); + } +} + +/* Calculate the block/rev files that should be deleted to remain under target*/ +void FindFilesToPrune(std::set<int>& setFilesToPrune) +{ + LOCK2(cs_main, cs_LastBlockFile); + if (chainActive.Tip() == NULL || nPruneTarget == 0) { + return; + } + if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) { + return; + } + + unsigned int nLastBlockWeMustKeep = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; + uint64_t nCurrentUsage = CalculateCurrentUsage(); + // We don't check to prune until after we've allocated new space for files + // So we should leave a buffer under our target to account for another allocation + // before the next pruning. + uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; + uint64_t nBytesToPrune; + int count=0; + + if (nCurrentUsage + nBuffer >= nPruneTarget) { + for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { + nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; + + if (vinfoBlockFile[fileNumber].nSize == 0) + continue; + + if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target? + break; + + // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip + if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeMustKeep) + break; + + PruneOneBlockFile(fileNumber); + // Queue up the files for removal + setFilesToPrune.insert(fileNumber); + nCurrentUsage -= nBytesToPrune; + count++; + } + } + + LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB min_must_keep=%d removed %d blk/rev pairs\n", + nPruneTarget/1024/1024, nCurrentUsage/1024/1024, + ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, + nLastBlockWeMustKeep, count); +} + bool CheckDiskSpace(uint64_t nAdditionalBytes) { uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available; @@ -2854,6 +3038,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash) bool static LoadBlockIndexDB() { + const CChainParams& chainparams = Params(); if (!pblocktree->LoadBlockIndexGuts()) return false; @@ -2872,7 +3057,9 @@ bool static LoadBlockIndexDB() { CBlockIndex* pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); - if (pindex->nStatus & BLOCK_HAVE_DATA) { + // We can link the chain of blocks for which we've received transactions at some point. + // Pruned nodes may have deleted the block. + if (pindex->nTx > 0) { if (pindex->pprev) { if (pindex->pprev->nChainTx) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; @@ -2929,6 +3116,11 @@ bool static LoadBlockIndexDB() } } + // Check whether we have ever pruned block & undo files + pblocktree->ReadFlag("prunedblockfiles", fHavePruned); + if (fHavePruned) + LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); + // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); @@ -2949,7 +3141,7 @@ bool static LoadBlockIndexDB() LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), - Checkpoints::GuessVerificationProgress(chainActive.Tip())); + Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip())); return true; } @@ -3069,6 +3261,7 @@ void UnloadBlockIndex() delete entry.second; } mapBlockIndex.clear(); + fHavePruned = false; } bool LoadBlockIndex() @@ -3260,6 +3453,7 @@ void static CheckBlockIndex() int nHeight = 0; CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. + CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0. CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). @@ -3268,6 +3462,7 @@ void static CheckBlockIndex() nNodes++; if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; + if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; @@ -3279,12 +3474,21 @@ void static CheckBlockIndex() assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match. assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. } - // HAVE_DATA is equivalent to VALID_TRANSACTIONS and equivalent to nTx > 0 (we stored the number of transactions in the block) - assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); - assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked - // All parents having data is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. - assert((pindexFirstMissing != NULL) == (pindex->nChainTx == 0)); // nChainTx == 0 is used to signal that all parent block's transaction data is available. + // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). + // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. + if (!fHavePruned) { + // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 + assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); + assert(pindexFirstMissing == pindexFirstNeverProcessed); + } else { + // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 + if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); + } + if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); + assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. + // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. + assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0)); assert(pindex->nHeight == nHeight); // nHeight must be consistent. assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. @@ -3297,11 +3501,20 @@ void static CheckBlockIndex() // Checks for not-invalid blocks. assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. } - if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstMissing == NULL) { - if (pindexFirstInvalid == NULL) { // If this block sorts at least as good as the current tip and is valid, it must be in setBlockIndexCandidates. - assert(setBlockIndexCandidates.count(pindex)); + if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) { + if (pindexFirstInvalid == NULL) { + // If this block sorts at least as good as the current tip and + // is valid and we have all data for its parents, it must be in + // setBlockIndexCandidates. chainActive.Tip() must also be there + // even if some data has been pruned. + if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) { + assert(setBlockIndexCandidates.count(pindex)); + } + // If some parent is missing, then it could be that this block was in + // setBlockIndexCandidates but had to be removed because of the missing data. + // In this case it must be in mapBlocksUnlinked -- see test below. } - } else { // If this block sorts worse than the current tip, it cannot be in setBlockIndexCandidates. + } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } // Check whether this block is in mapBlocksUnlinked. @@ -3315,12 +3528,28 @@ void static CheckBlockIndex() } rangeUnlinked.first++; } - if (pindex->pprev && pindex->nStatus & BLOCK_HAVE_DATA && pindexFirstMissing != NULL) { - if (pindexFirstInvalid == NULL) { // If this block has block data available, some parent doesn't, and has no invalid parents, it must be in mapBlocksUnlinked. - assert(foundInUnlinked); + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) { + // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. + assert(foundInUnlinked); + } + if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA + if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) { + // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. + assert(fHavePruned); // We must have pruned. + // This block may have entered mapBlocksUnlinked if: + // - it has a descendant that at some point had more work than the + // tip, and + // - we tried switching to that descendant but were missing + // data for some intermediate block between chainActive and the + // tip. + // So if this block is itself better than chainActive.Tip() and it wasn't in + // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. + if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { + if (pindexFirstInvalid == NULL) { + assert(foundInUnlinked); + } } - } else { // If this block does not have block data available, or all parents do, it cannot be in mapBlocksUnlinked. - assert(!foundInUnlinked); } // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow // End: actual consistency checks. @@ -3340,6 +3569,7 @@ void static CheckBlockIndex() // If pindex was the first with a certain property, unset the corresponding variable. if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; + if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL; if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL; if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; @@ -3459,7 +3689,6 @@ bool static AlreadyHave(const CInv& inv) return true; } - void static ProcessGetData(CNode* pfrom) { std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); @@ -3487,17 +3716,21 @@ void static ProcessGetData(CNode* pfrom) if (chainActive.Contains(mi->second)) { send = true; } else { + static const int nOneMonth = 30 * 24 * 60 * 60; // To prevent fingerprinting attacks, only send blocks outside of the active - // chain if they are valid, and no more than a month older than the best header - // chain we know about. + // chain if they are valid, and no more than a month older (both in time, and in + // best equivalent proof of work) than the best header chain we know about. send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) && - (mi->second->GetBlockTime() > pindexBestHeader->GetBlockTime() - 30 * 24 * 60 * 60); + (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && + (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth); if (!send) { LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); } } } - if (send) + // Pruned nodes may have deleted the block, so check whether + // it's available before trying to send. + if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) { // Send block from disk CBlock block; @@ -3514,7 +3747,7 @@ void static ProcessGetData(CNode* pfrom) pfrom->PushMessage("merkleblock", merkleBlock); // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see // This avoids hurting performance by pointlessly requiring a round-trip - // Note that there is currently no way for a node to request any single transactions we didnt send here - + // Note that there is currently no way for a node to request any single transactions we didn't send here - // they must either disconnect and retry or request the full block. // Thus, the protocol spec specified allows for us to provide duplicate txn here, // however we MUST always provide at least what the remote peer needs @@ -3527,7 +3760,7 @@ void static ProcessGetData(CNode* pfrom) // no response } - // Trigger them to send a getblocks request for the next batch of inventory + // Trigger the peer node to send a getblocks request for the next batch of inventory if (inv.hash == pfrom->hashContinue) { // Bypass PushInventory, this must send even if redundant, @@ -3776,7 +4009,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, { LOCK(cs_vNodes); // Use deterministic randomness to send to the same nodes for 24 hours - // at a time so the setAddrKnowns of the chosen nodes prevent repeats + // at a time so the addrKnowns of the chosen nodes prevent repeats static uint256 hashSalt; if (hashSalt.IsNull()) hashSalt = GetRandHash(); @@ -3841,7 +4074,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, if (inv.type == MSG_BLOCK) { UpdateBlockAvailability(pfrom->GetId(), inv.hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { - // First request the headers preceeding the announced block. In the normal fully-synced + // First request the headers preceding the announced block. In the normal fully-synced // case where a new block is announced that succeeds the current tip (no reorganization), // there are no such headers. // Secondly, and only when we are close to being synced, we request the announced block directly, @@ -3923,8 +4156,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); if (--nLimit <= 0) { - // When this block is requested, we'll send an inv that'll make them - // getblocks the next batch of inventory. + // When this block is requested, we'll send an inv that'll + // trigger the peer to getblocks the next batch of inventory. LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom->hashContinue = pindex->GetBlockHash(); break; @@ -4161,9 +4394,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // This asymmetric behavior for inbound and outbound connections was introduced // to prevent a fingerprinting attack: an attacker can send specific fake addresses - // to users' AddrMan and later request them by sending getaddr messages. - // Making users (which are behind NAT and can only make outgoing connections) ignore - // getaddr message mitigates the attack. + // to users' AddrMan and later request them by sending getaddr messages. + // Making nodes which are behind NAT and can only make outgoing connections ignore + // the getaddr message mitigates the attack. else if ((strCommand == "getaddr") && (pfrom->fInbound)) { pfrom->vAddrToSend.clear(); @@ -4248,7 +4481,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { - // This is most likely a bug in another implementation somewhere, cancel this ping + // This is most likely a bug in another implementation somewhere; cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } @@ -4257,7 +4490,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, sProblem = "Unsolicited pong without ping"; } } else { - // This is most likely a bug in another implementation somewhere, cancel this ping + // This is most likely a bug in another implementation somewhere; cancel this ping bPingFinished = true; sProblem = "Short payload"; } @@ -4516,7 +4749,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) { const Consensus::Params& consensusParams = Params().GetConsensus(); { - // Don't send anything until we get their version message + // Don't send anything until we get its version message if (pto->nVersion == 0) return true; @@ -4560,9 +4793,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle) LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) { - // Periodically clear setAddrKnown to allow refresh broadcasts + // Periodically clear addrKnown to allow refresh broadcasts if (nLastRebroadcast) - pnode->setAddrKnown.clear(); + pnode->addrKnown.clear(); // Rebroadcast our address AdvertizeLocal(pnode); @@ -4580,9 +4813,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle) vAddr.reserve(pto->vAddrToSend.size()); BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend) { - // returns true if wasn't already contained in the set - if (pto->setAddrKnown.insert(addr).second) + if (!pto->addrKnown.contains(addr.GetKey())) { + pto->addrKnown.insert(addr.GetKey()); vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) @@ -4621,9 +4854,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle) if (pindexBestHeader == NULL) pindexBestHeader = chainActive.Tip(); bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. - if (!state.fSyncStarted && !pto->fClient && fFetch && !fImporting && !fReindex) { + if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. - if (nSyncStarted == 0 || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { + if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; nSyncStarted++; CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader; @@ -4700,7 +4933,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link - // being saturated. We only count validated in-flight blocks so peers can't advertize nonexisting block hashes + // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes // to unreasonably increase our timeout. if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * consensusParams.nPowTargetSpacing * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); @@ -4711,7 +4944,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // Message: getdata (blocks) // vector<CInv> vGetData; - if (!pto->fDisconnect && !pto->fClient && fFetch && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vector<CBlockIndex*> vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); |