diff options
Diffstat (limited to 'src/main.cpp')
-rw-r--r-- | src/main.cpp | 379 |
1 files changed, 243 insertions, 136 deletions
diff --git a/src/main.cpp b/src/main.cpp index 39da3406a8..e92f64413a 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -11,6 +11,7 @@ #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" +#include "consensus/validation.h" #include "init.h" #include "merkleblock.h" #include "net.h" @@ -28,6 +29,7 @@ #include <boost/algorithm/string/replace.hpp> #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> +#include <boost/math/distributions/poisson.hpp> #include <boost/thread.hpp> using namespace std; @@ -57,7 +59,7 @@ bool fPruneMode = false; bool fIsBareMultisigStd = true; bool fCheckBlockIndex = false; bool fCheckpointsEnabled = true; -unsigned int nCoinCacheSize = 5000; +size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; /** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */ @@ -75,10 +77,9 @@ void EraseOrphansFor(NodeId peer); /** * Returns true if there are nRequired or more blocks of minVersion or above - * in the last Params().ToCheckBlockUpgradeMajority() blocks, starting at pstart - * and going backwards. + * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards. */ -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired); +static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); static void CheckBlockIndex(); /** Constant stuff for coinbase transactions we create: */ @@ -154,8 +155,8 @@ namespace { uint256 hash; CBlockIndex *pindex; //! Optional. int64_t nTime; //! Time of "getdata" request in microseconds. - int nValidatedQueuedBefore; //! Number of blocks queued with validated headers (globally) at the time this one is requested. bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. + int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer) }; map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; @@ -216,6 +217,7 @@ struct CNodeState { int64_t nStallingSince; list<QueuedBlock> vBlocksInFlight; int nBlocksInFlight; + int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; @@ -229,6 +231,7 @@ struct CNodeState { fSyncStarted = false; nStallingSince = 0; nBlocksInFlight = 0; + nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; } }; @@ -260,6 +263,12 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state) nPreferredDownload += state->fPreferredDownload; } +// Returns time at which to timeout block request (nTime in microseconds) +int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams) +{ + return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore); +} + void InitializeNode(NodeId nodeid, const CNode *pnode) { LOCK(cs_main); CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; @@ -287,30 +296,36 @@ void FinalizeNode(NodeId nodeid) { } // Requires cs_main. -void MarkBlockAsReceived(const uint256& hash) { +// Returns a bool indicating whether we requested this block. +bool MarkBlockAsReceived(const uint256& hash) { map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; + state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); + return true; } + return false; } // Requires cs_main. -void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex = NULL) { +void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) { CNodeState *state = State(nodeid); assert(state != NULL); // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); - QueuedBlock newentry = {hash, pindex, GetTimeMicros(), nQueuedValidatedHeaders, pindex != NULL}; + int64_t nNow = GetTimeMicros(); + QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)}; nQueuedValidatedHeaders += newentry.fValidatedHeaders; list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); state->nBlocksInFlight++; + state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders; mapBlocksInFlight[hash] = std::make_pair(nodeid, it); } @@ -659,14 +674,8 @@ bool IsStandardTx(const CTransaction& tx, string& reason) bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) { - AssertLockHeld(cs_main); - // Time based nLockTime implemented in 0.1.6 if (tx.nLockTime == 0) return true; - if (nBlockHeight == 0) - nBlockHeight = chainActive.Height(); - if (nBlockTime == 0) - nBlockTime = GetAdjustedTime(); if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime)) return true; BOOST_FOREACH(const CTxIn& txin, tx.vin) @@ -675,10 +684,16 @@ bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) return true; } +bool CheckFinalTx(const CTransaction &tx) +{ + AssertLockHeld(cs_main); + return IsFinalTx(tx, chainActive.Height() + 1, GetAdjustedTime()); +} + /** * Check transaction inputs to mitigate two * potential denial-of-service attacks: - * + * * 1. scriptSigs with extra data stuffed into them, * not consumed by scriptPubKey (or P2SH script) * 2. P2SH scripts with a crazy number of expensive @@ -891,21 +906,8 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. - // - // However, IsFinalTx() is confusing... Without arguments, it uses - // chainActive.Height() to evaluate nLockTime; when a block is accepted, - // chainActive.Height() is set to the value of nHeight in the block. - // However, when IsFinalTx() is called within CBlock::AcceptBlock(), the - // height of the block *being* evaluated is what is used. Thus if we want - // to know if a transaction can be part of the *next* block, we need to - // call IsFinalTx() with one more than chainActive.Height(). - // - // Timestamps on the other hand don't get any special treatment, because we - // can't know what timestamp the next block will have, and there aren't - // timestamp applications where it matters. - if (!IsFinalTx(tx, chainActive.Height() + 1)) - return state.DoS(0, - error("AcceptToMemoryPool: non-final"), + if (!CheckFinalTx(tx)) + return state.DoS(0, error("AcceptToMemoryPool: non-final"), REJECT_NONSTANDARD, "non-final"); // is it already in the memory pool? @@ -987,7 +989,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa CAmount nFees = nValueIn-nValueOut; double dPriority = view.GetPriority(tx, chainActive.Height()); - CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height()); + CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx)); unsigned int nSize = entry.GetTxSize(); // Don't accept it if it can't get into a block @@ -1053,7 +1055,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa } // Store transaction in memory - pool.addUnchecked(hash, entry); + pool.addUnchecked(hash, entry, !IsInitialBlockDownload()); } SyncWithWallets(tx, NULL); @@ -1134,7 +1136,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock // CBlock and CBlockIndex // -bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos) +bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); @@ -1143,7 +1145,7 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos) // Write index header unsigned int nSize = fileout.GetSerializeSize(block); - fileout << FLATDATA(Params().MessageStart()) << nSize; + fileout << FLATDATA(messageStart) << nSize; // Write block long fileOutPos = ftell(fileout.Get()); @@ -1189,19 +1191,17 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex) return true; } -CAmount GetBlockValue(int nHeight, const CAmount& nFees) +CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) { - CAmount nSubsidy = 50 * COIN; - int halvings = nHeight / Params().SubsidyHalvingInterval(); - + int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; // Force block reward to zero when right shift is undefined. if (halvings >= 64) - return nFees; + return 0; + CAmount nSubsidy = 50 * COIN; // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years. nSubsidy >>= halvings; - - return nSubsidy + nFees; + return nSubsidy; } bool IsInitialBlockDownload() @@ -1499,7 +1499,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi namespace { -bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock) +bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); @@ -1508,7 +1508,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint // Write index header unsigned int nSize = fileout.GetSerializeSize(blockundo); - fileout << FLATDATA(Params().MessageStart()) << nSize; + fileout << FLATDATA(messageStart) << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); @@ -1553,6 +1553,24 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uin return true; } +/** Abort with a message */ +bool AbortNode(const std::string& strMessage, const std::string& userMessage="") +{ + strMiscWarning = strMessage; + LogPrintf("*** %s\n", strMessage); + uiInterface.ThreadSafeMessageBox( + userMessage.empty() ? _("Error: A fatal internal error occured, see debug.log for details") : userMessage, + "", CClientUIInterface::MSG_ERROR); + StartShutdown(); + return false; +} + +bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") +{ + AbortNode(strMessage, userMessage); + return state.Error(strMessage); +} + } // anon namespace /** @@ -1688,6 +1706,64 @@ void ThreadScriptCheck() { scriptcheckqueue.Thread(); } +// +// Called periodically asynchronously; alerts if it smells like +// we're being fed a bad chain (blocks being generated much +// too slowly or too quickly). +// +void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CChain& chain, int64_t nPowTargetSpacing) +{ + if (initialDownloadCheck()) return; + + static int64_t lastAlertTime = 0; + int64_t now = GetAdjustedTime(); + if (lastAlertTime > now-60*60*24) return; // Alert at most once per day + + const int SPAN_HOURS=4; + const int SPAN_SECONDS=SPAN_HOURS*60*60; + int BLOCKS_EXPECTED = SPAN_SECONDS / nPowTargetSpacing; + + boost::math::poisson_distribution<double> poisson(BLOCKS_EXPECTED); + + std::string strWarning; + int64_t startTime = GetAdjustedTime()-SPAN_SECONDS; + + LOCK(cs); + int h = chain.Height(); + while (h > 0 && chain[h]->GetBlockTime() >= startTime) + --h; + int nBlocks = chain.Height()-h; + + // How likely is it to find that many by chance? + double p = boost::math::pdf(poisson, nBlocks); + + LogPrint("partitioncheck", "%s : Found %d blocks in the last %d hours\n", __func__, nBlocks, SPAN_HOURS); + LogPrint("partitioncheck", "%s : likelihood: %g\n", __func__, p); + + // Aim for one false-positive about every fifty years of normal running: + const int FIFTY_YEARS = 50*365*24*60*60; + double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS); + + if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED) + { + // Many fewer blocks than expected: alert! + strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"), + nBlocks, SPAN_HOURS, BLOCKS_EXPECTED); + } + else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED) + { + // Many more blocks than expected: alert! + strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"), + nBlocks, SPAN_HOURS, BLOCKS_EXPECTED); + } + if (!strWarning.empty()) + { + strMiscWarning = strWarning; + CAlert::Notify(strWarning, true); + lastAlertTime = now; + } +} + static int64_t nTimeVerify = 0; static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; @@ -1754,7 +1830,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks, when 75% of the network has upgraded: - if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, Params().EnforceBlockUpgradeMajority())) { + if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) { flags |= SCRIPT_VERIFY_DERSIG; } @@ -1817,10 +1893,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001); - if (block.vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees)) + CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); + if (block.vtx[0].GetValueOut() > blockReward) return state.DoS(100, error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", - block.vtx[0].GetValueOut(), GetBlockValue(pindex->nHeight, nFees)), + block.vtx[0].GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount"); if (!control.Wait()) @@ -1838,8 +1915,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin CDiskBlockPos pos; if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) return error("ConnectBlock(): FindUndoPos failed"); - if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash())) - return state.Abort("Failed to write undo data"); + if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) + return AbortNode(state, "Failed to write undo data"); // update nUndoPos in block index pindex->nUndoPos = pos.nPos; @@ -1852,7 +1929,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin if (fTxIndex) if (!pblocktree->WriteTxIndex(vPos)) - return state.Abort("Failed to write transaction index"); + return AbortNode(state, "Failed to write transaction index"); // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); @@ -1887,11 +1964,14 @@ enum FlushStateMode { bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { LOCK2(cs_main, cs_LastBlockFile); static int64_t nLastWrite = 0; + static int64_t nLastFlush = 0; + static int64_t nLastSetChain = 0; std::set<int> setFilesToPrune; bool fFlushForPrune = false; try { if (fPruneMode && fCheckForPruning) { FindFilesToPrune(setFilesToPrune); + fCheckForPruning = false; if (!setFilesToPrune.empty()) { fFlushForPrune = true; if (!fHavePruned) { @@ -1900,16 +1980,32 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { } } } - if ((mode == FLUSH_STATE_ALWAYS) || - ((mode == FLUSH_STATE_PERIODIC || mode == FLUSH_STATE_IF_NEEDED) && pcoinsTip->GetCacheSize() > nCoinCacheSize) || - (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000) || - fFlushForPrune) { - // Typical CCoins structures on disk are around 100 bytes in size. - // Pushing a new one to the database can cause it to be written - // twice (once in the log, and once in the tables). This is already - // an overestimation, as most will delete an existing entry or - // overwrite one. Still, use a conservative safety factor of 2. - if (!CheckDiskSpace(100 * 2 * 2 * pcoinsTip->GetCacheSize())) + int64_t nNow = GetTimeMicros(); + // Avoid writing/flushing immediately after startup. + if (nLastWrite == 0) { + nLastWrite = nNow; + } + if (nLastFlush == 0) { + nLastFlush = nNow; + } + if (nLastSetChain == 0) { + nLastSetChain = nNow; + } + size_t cacheSize = pcoinsTip->DynamicMemoryUsage(); + // The cache is large and close to the limit, but we have time now (not in the middle of a block processing). + bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage; + // The cache is over the limit, we have to write now. + bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage; + // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. + bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; + // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. + bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; + // Combine all conditions that result in a full cache flush. + bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; + // Write blocks and block index to disk. + if (fDoFullFlush || fPeriodicWrite) { + // Depend on nMinDiskSpace to ensure we can write block index + if (!CheckDiskSpace(0)) return state.Error("out of disk space"); // First make sure all block and undo data is flushed to disk. FlushBlockFile(); @@ -1928,27 +2024,35 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { setDirtyBlockIndex.erase(it++); } if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { - return state.Abort("Files to write to block index database"); + return AbortNode(state, "Files to write to block index database"); } } - // Flush the chainstate (which may refer to block index entries). - if (!pcoinsTip->Flush()) - return state.Abort("Failed to write to coin database"); - // Finally remove any pruned files - if (fFlushForPrune) { + if (fFlushForPrune) UnlinkPrunedFiles(setFilesToPrune); - fCheckForPruning = false; - } - + nLastWrite = nNow; + } + // Flush best chain related state. This can only be done if the blocks / block index write was also done. + if (fDoFullFlush) { + // Typical CCoins structures on disk are around 128 bytes in size. + // Pushing a new one to the database can cause it to be written + // twice (once in the log, and once in the tables). This is already + // an overestimation, as most will delete an existing entry or + // overwrite one. Still, use a conservative safety factor of 2. + if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize())) + return state.Error("out of disk space"); + // Flush the chainstate (which may refer to block index entries). + if (!pcoinsTip->Flush()) + return AbortNode(state, "Failed to write to coin database"); + nLastFlush = nNow; + } + if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) { // Update best block in wallet (so we can detect restored wallets). - if (mode != FLUSH_STATE_IF_NEEDED) { - GetMainSignals().SetBestChain(chainActive.GetLocator()); - } - nLastWrite = GetTimeMicros(); + GetMainSignals().SetBestChain(chainActive.GetLocator()); + nLastSetChain = nNow; } } catch (const std::runtime_error& e) { - return state.Abort(std::string("System error while flushing: ") + e.what()); + return AbortNode(state, std::string("System error while flushing: ") + e.what()); } return true; } @@ -1973,10 +2077,10 @@ void static UpdateTip(CBlockIndex *pindexNew) { nTimeBestReceived = GetTime(); mempool.AddTransactionsUpdated(1); - LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%u\n", __func__, + LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), - Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), (unsigned int)pcoinsTip->GetCacheSize()); + Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); cvBlockChange.notify_all(); @@ -2012,7 +2116,7 @@ bool static DisconnectTip(CValidationState &state) { // Read block from disk. CBlock block; if (!ReadBlockFromDisk(block, pindexDelete)) - return state.Abort("Failed to read block"); + return AbortNode(state, "Failed to read block"); // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { @@ -2051,7 +2155,7 @@ static int64_t nTimeFlush = 0; static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; -/** +/** * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock * corresponding to pindexNew, to bypass loading it again from disk. */ @@ -2063,7 +2167,7 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock * CBlock block; if (!pblock) { if (!ReadBlockFromDisk(block, pindexNew)) - return state.Abort("Failed to read block"); + return AbortNode(state, "Failed to read block"); pblock = █ } // Apply the block atomically to the chain state. @@ -2094,7 +2198,7 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock * LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool. list<CTransaction> txConflicted; - mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted); + mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload()); mempool.check(pcoinsTip); // Update chainActive & related variables. UpdateTip(pindexNew); @@ -2646,18 +2750,14 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta } // Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded: - if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityRejectBlockOutdated)) - { + if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams)) return state.Invalid(error("%s: rejected nVersion=1 block", __func__), REJECT_OBSOLETE, "bad-version"); - } // Reject block.nVersion=2 blocks when 95% (75% on testnet) of the network has upgraded: - if (block.nVersion < 3 && IsSuperMajority(3, pindexPrev, consensusParams.nMajorityRejectBlockOutdated)) - { + if (block.nVersion < 3 && IsSuperMajority(3, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams)) return state.Invalid(error("%s : rejected nVersion=2 block", __func__), REJECT_OBSOLETE, "bad-version"); - } return true; } @@ -2665,6 +2765,7 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev) { const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; + const Consensus::Params& consensusParams = Params().GetConsensus(); // Check that all transactions are finalized BOOST_FOREACH(const CTransaction& tx, block.vtx) @@ -2674,7 +2775,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet): - if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, Params().EnforceBlockUpgradeMajority())) + if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams)) { CScript expect = CScript() << nHeight; if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || @@ -2730,8 +2831,9 @@ bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBloc return true; } -bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, CDiskBlockPos* dbp) +bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp) { + const CChainParams& chainparams = Params(); AssertLockHeld(cs_main); CBlockIndex *&pindex = *ppindex; @@ -2739,13 +2841,18 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, if (!AcceptBlockHeader(block, state, &pindex)) return false; - // If we're pruning, ensure that we don't allow a peer to dump a copy - // of old blocks. But we might need blocks that are not on the main chain - // to handle a reorg, even if we've processed once. - if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { - // TODO: deal better with duplicate blocks. - // return state.DoS(20, error("AcceptBlock(): already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate"); - return true; + // Try to process all requested blocks that we don't have, but only + // process an unrequested block if it's new and has enough work to + // advance our tip. + bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; + bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); + + // TODO: deal better with return value and error conditions for duplicate + // and unrequested blocks. + if (fAlreadyHave) return true; + if (!fRequested) { // If we didn't ask for it: + if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned + if (!fHasMoreWork) return true; // Don't process less-work chains } if ((!CheckBlock(block, state)) || !ContextualCheckBlock(block, state, pindex->pprev)) { @@ -2767,12 +2874,12 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) return error("AcceptBlock(): FindBlockPos failed"); if (dbp == NULL) - if (!WriteBlockToDisk(block, blockPos)) - return state.Abort("Failed to write block"); + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) + AbortNode(state, "Failed to write block"); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) return error("AcceptBlock(): ReceivedBlockTransactions failed"); } catch (const std::runtime_error& e) { - return state.Abort(std::string("System error: ") + e.what()); + return AbortNode(state, std::string("System error: ") + e.what()); } if (fCheckForPruning) @@ -2781,11 +2888,10 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, return true; } -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired) +static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) { - unsigned int nToCheck = Params().ToCheckBlockUpgradeMajority(); unsigned int nFound = 0; - for (unsigned int i = 0; i < nToCheck && nFound < nRequired && pstart != NULL; i++) + for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) { if (pstart->nVersion >= minVersion) ++nFound; @@ -2795,21 +2901,22 @@ static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned } -bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp) +bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp) { // Preliminary checks bool checked = CheckBlock(*pblock, state); { LOCK(cs_main); - MarkBlockAsReceived(pblock->GetHash()); + bool fRequested = MarkBlockAsReceived(pblock->GetHash()); + fRequested |= fForceProcessing; if (!checked) { return error("%s: CheckBlock FAILED", __func__); } // Store to disk CBlockIndex *pindex = NULL; - bool ret = AcceptBlock(*pblock, state, &pindex, dbp); + bool ret = AcceptBlock(*pblock, state, &pindex, fRequested, dbp); if (pindex && pfrom) { mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId(); } @@ -2848,24 +2955,6 @@ bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex return true; } - - - - - - - -bool AbortNode(const std::string &strMessage, const std::string &userMessage) { - strMiscWarning = strMessage; - LogPrintf("*** %s\n", strMessage); - uiInterface.ThreadSafeMessageBox( - userMessage.empty() ? _("Error: A fatal internal error occured, see debug.log for details") : userMessage, - "", CClientUIInterface::MSG_ERROR); - StartShutdown(); - return false; -} - - /** * BLOCK PRUNING CODE */ @@ -2934,7 +3023,7 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune) return; } - unsigned int nLastBlockWeMustKeep = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; + unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files // So we should leave a buffer under our target to account for another allocation @@ -2954,7 +3043,7 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune) break; // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip - if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeMustKeep) + if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) break; PruneOneBlockFile(fileNumber); @@ -2965,10 +3054,10 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune) } } - LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB min_must_keep=%d removed %d blk/rev pairs\n", + LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget/1024/1024, nCurrentUsage/1024/1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, - nLastBlockWeMustKeep, count); + nLastBlockWeCanPrune, count); } bool CheckDiskSpace(uint64_t nAdditionalBytes) @@ -3199,7 +3288,7 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth } } // check level 3: check for inconsistencies during memory-only disconnect of tip blocks - if (nCheckLevel >= 3 && pindex == pindexState && (coins.GetCacheSize() + pcoinsTip->GetCacheSize()) <= nCoinCacheSize) { + if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { bool fClean = true; if (!DisconnectBlock(block, state, pindex, coins, &fClean)) return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); @@ -3276,6 +3365,7 @@ bool LoadBlockIndex() bool InitBlockIndex() { + const CChainParams& chainparams = Params(); LOCK(cs_main); // Check whether we're already initialized if (chainActive.Genesis() != NULL) @@ -3296,7 +3386,7 @@ bool InitBlockIndex() { CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) return error("LoadBlockIndex(): FindBlockPos failed"); - if (!WriteBlockToDisk(block, blockPos)) + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) return error("LoadBlockIndex(): writing genesis block to disk failed"); CBlockIndex *pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) @@ -3374,7 +3464,7 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) // process in case the block isn't known yet if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { CValidationState state; - if (ProcessNewBlock(state, NULL, &block, dbp)) + if (ProcessNewBlock(state, NULL, &block, true, dbp)) nLoaded++; if (state.IsError()) break; @@ -3396,7 +3486,7 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), head.ToString()); CValidationState dummy; - if (ProcessNewBlock(dummy, NULL, &block, &it->second)) + if (ProcessNewBlock(dummy, NULL, &block, true, &it->second)) { nLoaded++; queue.push_back(block.GetHash()); @@ -3608,7 +3698,7 @@ void static CheckBlockIndex() // CAlert // -string GetWarnings(string strFor) +std::string GetWarnings(const std::string& strFor) { int nPriority = 0; string strStatusBar; @@ -4091,7 +4181,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, vToFetch.push_back(inv); // Mark block as in flight already, even though the actual "getdata" message only goes out // later (within the same cs_main lock, though). - MarkBlockAsInFlight(pfrom->GetId(), inv.hash); + MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus()); } LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); } @@ -4176,6 +4266,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, LOCK(cs_main); + if (IsInitialBlockDownload()) + return true; + CBlockIndex* pindex = NULL; if (locator.IsNull()) { @@ -4380,7 +4473,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->AddInventoryKnown(inv); CValidationState state; - ProcessNewBlock(state, pfrom, &block); + // Process all blocks from whitelisted peers, even if not requested. + ProcessNewBlock(state, pfrom, &block, pfrom->fWhitelisted, NULL); int nDoS; if (state.IsInvalid(nDoS)) { pfrom->PushMessage("reject", strCommand, state.GetRejectCode(), @@ -4937,9 +5031,22 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes // to unreasonably increase our timeout. - if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * consensusParams.nPowTargetSpacing * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { - LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); - pto->fDisconnect = true; + // We also compare the block download timeout originally calculated against the time at which we'd disconnect + // if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're + // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a + // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing + // more quickly than once every 5 minutes, then we'll shorten the download window for this block). + if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) { + QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); + int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams); + if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) { + LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow); + queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow; + } + if (queuedBlock.nTimeDisconnect < nNow) { + LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); + pto->fDisconnect = true; + } } // @@ -4952,7 +5059,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); BOOST_FOREACH(CBlockIndex *pindex, vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); - MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); + MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->id); } |