diff options
Diffstat (limited to 'src/main.cpp')
-rw-r--r-- | src/main.cpp | 785 |
1 files changed, 336 insertions, 449 deletions
diff --git a/src/main.cpp b/src/main.cpp index 784e2569d9..27c427f7cd 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,21 +1,24 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2014 The Bitcoin developers +// Copyright (c) 2009-2014 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "main.h" +#include "arith_uint256.h" #include "addrman.h" #include "alert.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" #include "init.h" +#include "merkleblock.h" #include "net.h" #include "pow.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" +#include "undo.h" #include "util.h" #include "utilmoneystr.h" @@ -26,7 +29,6 @@ #include <boost/filesystem/fstream.hpp> #include <boost/thread.hpp> -using namespace boost; using namespace std; #if defined(NDEBUG) @@ -66,6 +68,13 @@ map<uint256, COrphanTx> mapOrphanTransactions; map<uint256, set<uint256> > mapOrphanTransactionsByPrev; void EraseOrphansFor(NodeId peer); +/** + * Returns true if there are nRequired or more blocks of minVersion or above + * in the last Params().ToCheckBlockUpgradeMajority() blocks, starting at pstart + * and going backwards. + */ +static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired); + /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; @@ -130,9 +139,14 @@ namespace { uint256 hash; CBlockIndex *pindex; //! Optional. int64_t nTime; //! Time of "getdata" request in microseconds. + int nValidatedQueuedBefore; //! Number of blocks queued with validated headers (globally) at the time this one is requested. + bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. }; map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; + /** Number of blocks in flight with validated headers. */ + int nQueuedValidatedHeaders = 0; + /** Number of preferable block download peers. */ int nPreferredDownload = 0; @@ -252,7 +266,7 @@ struct CNodeState { nMisbehavior = 0; fShouldBan = false; pindexBestKnownBlock = NULL; - hashLastUnknownBlock = uint256(0); + hashLastUnknownBlock.SetNull(); pindexLastCommonBlock = NULL; fSyncStarted = false; nStallingSince = 0; @@ -314,6 +328,7 @@ void MarkBlockAsReceived(const uint256& hash) { map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); + nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; @@ -329,7 +344,8 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); - QueuedBlock newentry = {hash, pindex, GetTimeMicros()}; + QueuedBlock newentry = {hash, pindex, GetTimeMicros(), nQueuedValidatedHeaders, pindex != NULL}; + nQueuedValidatedHeaders += newentry.fValidatedHeaders; list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); state->nBlocksInFlight++; mapBlocksInFlight[hash] = std::make_pair(nodeid, it); @@ -340,12 +356,12 @@ void ProcessBlockAvailability(NodeId nodeid) { CNodeState *state = State(nodeid); assert(state != NULL); - if (state->hashLastUnknownBlock != 0) { + if (!state->hashLastUnknownBlock.IsNull()) { BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) state->pindexBestKnownBlock = itOld->second; - state->hashLastUnknownBlock = uint256(0); + state->hashLastUnknownBlock.SetNull(); } } } @@ -441,6 +457,10 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl // are not yet downloaded and not in flight to vBlocks. In the mean time, update // pindexLastCommonBlock as long as all ancestors are already downloaded. BOOST_FOREACH(CBlockIndex* pindex, vToFetch) { + if (!pindex->IsValid(BLOCK_VALID_TREE)) { + // We consider the chain that this peer is on invalid. + return; + } if (pindex->nStatus & BLOCK_HAVE_DATA) { if (pindex->nChainTx) state->pindexLastCommonBlock = pindex; @@ -612,34 +632,11 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) bool IsStandardTx(const CTransaction& tx, string& reason) { - AssertLockHeld(cs_main); if (tx.nVersion > CTransaction::CURRENT_VERSION || tx.nVersion < 1) { reason = "version"; return false; } - // Treat non-final transactions as non-standard to prevent a specific type - // of double-spend attack, as well as DoS attacks. (if the transaction - // can't be mined, the attacker isn't expending resources broadcasting it) - // Basically we don't want to propagate transactions that can't be included in - // the next block. - // - // However, IsFinalTx() is confusing... Without arguments, it uses - // chainActive.Height() to evaluate nLockTime; when a block is accepted, chainActive.Height() - // is set to the value of nHeight in the block. However, when IsFinalTx() - // is called within CBlock::AcceptBlock(), the height of the block *being* - // evaluated is what is used. Thus if we want to know if a transaction can - // be part of the *next* block, we need to call IsFinalTx() with one more - // than chainActive.Height(). - // - // Timestamps on the other hand don't get any special treatment, because we - // can't know what timestamp the next block will have, and there aren't - // timestamp applications where it matters. - if (!IsFinalTx(tx, chainActive.Height() + 1)) { - reason = "non-final"; - return false; - } - // Extremely large transactions with lots of inputs can cost the network // almost as much to process as they cost the sender in fees, because // computing signature hashes is O(ninputs*txsize). Limiting transactions @@ -747,10 +744,10 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) // non-standard. Note that this EvalScript() call will // be quick, because if there are any operations // beside "push data" in the scriptSig - // IsStandard() will have already returned false + // IsStandardTx() will have already returned false // and this method isn't called. vector<vector<unsigned char> > stack; - if (!EvalScript(stack, tx.vin[i].scriptSig, false, BaseSignatureChecker())) + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker())) return false; if (whichType == TX_SCRIPTHASH) @@ -823,14 +820,14 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) { // Basic checks that don't depend on any context if (tx.vin.empty()) - return state.DoS(10, error("CheckTransaction() : vin empty"), + return state.DoS(10, error("CheckTransaction(): vin empty"), REJECT_INVALID, "bad-txns-vin-empty"); if (tx.vout.empty()) - return state.DoS(10, error("CheckTransaction() : vout empty"), + return state.DoS(10, error("CheckTransaction(): vout empty"), REJECT_INVALID, "bad-txns-vout-empty"); // Size limits if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) - return state.DoS(100, error("CheckTransaction() : size limits failed"), + return state.DoS(100, error("CheckTransaction(): size limits failed"), REJECT_INVALID, "bad-txns-oversize"); // Check for negative or overflow output values @@ -838,14 +835,14 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) BOOST_FOREACH(const CTxOut& txout, tx.vout) { if (txout.nValue < 0) - return state.DoS(100, error("CheckTransaction() : txout.nValue negative"), + return state.DoS(100, error("CheckTransaction(): txout.nValue negative"), REJECT_INVALID, "bad-txns-vout-negative"); if (txout.nValue > MAX_MONEY) - return state.DoS(100, error("CheckTransaction() : txout.nValue too high"), + return state.DoS(100, error("CheckTransaction(): txout.nValue too high"), REJECT_INVALID, "bad-txns-vout-toolarge"); nValueOut += txout.nValue; if (!MoneyRange(nValueOut)) - return state.DoS(100, error("CheckTransaction() : txout total out of range"), + return state.DoS(100, error("CheckTransaction(): txout total out of range"), REJECT_INVALID, "bad-txns-txouttotal-toolarge"); } @@ -854,7 +851,7 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) BOOST_FOREACH(const CTxIn& txin, tx.vin) { if (vInOutPoints.count(txin.prevout)) - return state.DoS(100, error("CheckTransaction() : duplicate inputs"), + return state.DoS(100, error("CheckTransaction(): duplicate inputs"), REJECT_INVALID, "bad-txns-inputs-duplicate"); vInOutPoints.insert(txin.prevout); } @@ -862,14 +859,14 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) if (tx.IsCoinBase()) { if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) - return state.DoS(100, error("CheckTransaction() : coinbase script size"), + return state.DoS(100, error("CheckTransaction(): coinbase script size"), REJECT_INVALID, "bad-cb-length"); } else { BOOST_FOREACH(const CTxIn& txin, tx.vin) if (txin.prevout.IsNull()) - return state.DoS(10, error("CheckTransaction() : prevout is null"), + return state.DoS(10, error("CheckTransaction(): prevout is null"), REJECT_INVALID, "bad-txns-prevout-null"); } @@ -907,27 +904,47 @@ CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowF bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, - bool* pfMissingInputs, bool fRejectInsaneFee) + bool* pfMissingInputs, bool fRejectAbsurdFee) { AssertLockHeld(cs_main); if (pfMissingInputs) *pfMissingInputs = false; if (!CheckTransaction(tx, state)) - return error("AcceptToMemoryPool: : CheckTransaction failed"); + return error("AcceptToMemoryPool: CheckTransaction failed"); // Coinbase is only valid in a block, not as a loose transaction if (tx.IsCoinBase()) - return state.DoS(100, error("AcceptToMemoryPool: : coinbase as individual tx"), + return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"), REJECT_INVALID, "coinbase"); // Rather not work on nonstandard transactions (unless -testnet/-regtest) string reason; if (Params().RequireStandard() && !IsStandardTx(tx, reason)) return state.DoS(0, - error("AcceptToMemoryPool : nonstandard transaction: %s", reason), + error("AcceptToMemoryPool: nonstandard transaction: %s", reason), REJECT_NONSTANDARD, reason); + // Only accept nLockTime-using transactions that can be mined in the next + // block; we don't want our mempool filled up with transactions that can't + // be mined yet. + // + // However, IsFinalTx() is confusing... Without arguments, it uses + // chainActive.Height() to evaluate nLockTime; when a block is accepted, + // chainActive.Height() is set to the value of nHeight in the block. + // However, when IsFinalTx() is called within CBlock::AcceptBlock(), the + // height of the block *being* evaluated is what is used. Thus if we want + // to know if a transaction can be part of the *next* block, we need to + // call IsFinalTx() with one more than chainActive.Height(). + // + // Timestamps on the other hand don't get any special treatment, because we + // can't know what timestamp the next block will have, and there aren't + // timestamp applications where it matters. + if (!IsFinalTx(tx, chainActive.Height() + 1)) + return state.DoS(0, + error("AcceptToMemoryPool: non-final"), + REJECT_NONSTANDARD, "non-final"); + // is it already in the memory pool? uint256 hash = tx.GetHash(); if (pool.exists(hash)) @@ -974,7 +991,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // are the actual inputs available? if (!view.HaveInputs(tx)) - return state.Invalid(error("AcceptToMemoryPool : inputs already spent"), + return state.Invalid(error("AcceptToMemoryPool: inputs already spent"), REJECT_DUPLICATE, "bad-txns-inputs-spent"); // Bring the best block into scope @@ -988,19 +1005,19 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // Check for non-standard pay-to-script-hash in inputs if (Params().RequireStandard() && !AreInputsStandard(tx, view)) - return error("AcceptToMemoryPool: : nonstandard transaction input"); + return error("AcceptToMemoryPool: nonstandard transaction input"); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction - // itself can contain sigops MAX_TX_SIGOPS is less than + // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than // merely non-standard transaction. unsigned int nSigOps = GetLegacySigOpCount(tx); nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > MAX_TX_SIGOPS) + if (nSigOps > MAX_STANDARD_TX_SIGOPS) return state.DoS(0, - error("AcceptToMemoryPool : too many sigops %s, %d > %d", - hash.ToString(), nSigOps, MAX_TX_SIGOPS), + error("AcceptToMemoryPool: too many sigops %s, %d > %d", + hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS), REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); CAmount nValueOut = tx.GetValueOut(); @@ -1013,11 +1030,16 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // Don't accept it if it can't get into a block CAmount txMinFee = GetMinRelayFee(tx, nSize, true); if (fLimitFree && nFees < txMinFee) - return state.DoS(0, error("AcceptToMemoryPool : not enough fees %s, %d < %d", + return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d", hash.ToString(), nFees, txMinFee), REJECT_INSUFFICIENTFEE, "insufficient fee"); - // Continuously rate-limit free (really, very-low-fee)transactions + // Require that free transactions have sufficient priority to be mined in the next block. + if (GetBoolArg("-relaypriority", true) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { + return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); + } + + // Continuously rate-limit free (really, very-low-fee) transactions // This mitigates 'penny-flooding' -- sending thousands of free transactions just to // be annoying or make others' transactions take longer to confirm. if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize)) @@ -1035,14 +1057,14 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000) - return state.DoS(0, error("AcceptToMemoryPool : free transaction rejected by rate limiter"), - REJECT_INSUFFICIENTFEE, "insufficient priority"); + return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"), + REJECT_INSUFFICIENTFEE, "rate limited free transaction"); LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize); dFreeCount += nSize; } - if (fRejectInsaneFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000) - return error("AcceptToMemoryPool: : insane fees %s, %d > %d", + if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000) + return error("AcceptToMemoryPool: absurdly high fees %s, %d > %d", hash.ToString(), nFees, ::minRelayTxFee.GetFee(nSize) * 10000); @@ -1050,8 +1072,23 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!CheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true)) { - return error("AcceptToMemoryPool: : ConnectInputs failed %s", hash.ToString()); + return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString()); } + + // Check again against just the consensus-critical mandatory script + // verification flags, in case of bugs in the standard flags that cause + // transactions to pass as valid when they're actually invalid. For + // instance the STRICTENC flag was incorrectly allowing certain + // CHECKSIG NOT scripts to pass, even though they were invalid. + // + // There is a similar check in CreateNewBlock() to prevent creating + // invalid blocks, however allowing such transactions into the mempool + // can be exploited as a DoS attack. + if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true)) + { + return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); + } + // Store transaction in memory pool.addUnchecked(hash, entry); } @@ -1078,17 +1115,19 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock CDiskTxPos postx; if (pblocktree->ReadTxIndex(hash, postx)) { CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); + if (file.IsNull()) + return error("%s: OpenBlockFile failed", __func__); CBlockHeader header; try { file >> header; fseek(file.Get(), postx.nTxOffset, SEEK_CUR); file >> txOut; - } catch (std::exception &e) { - return error("%s : Deserialize or I/O error - %s", __func__, e.what()); + } catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); if (txOut.GetHash() != hash) - return error("%s : txid mismatch", __func__); + return error("%s: txid mismatch", __func__); return true; } } @@ -1137,7 +1176,7 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos) // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) - return error("WriteBlockToDisk : OpenBlockFile failed"); + return error("WriteBlockToDisk: OpenBlockFile failed"); // Write index header unsigned int nSize = fileout.GetSerializeSize(block); @@ -1146,7 +1185,7 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos) // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) - return error("WriteBlockToDisk : ftell failed"); + return error("WriteBlockToDisk: ftell failed"); pos.nPos = (unsigned int)fileOutPos; fileout << block; @@ -1160,19 +1199,19 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos) // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) - return error("ReadBlockFromDisk : OpenBlockFile failed"); + return error("ReadBlockFromDisk: OpenBlockFile failed"); // Read block try { filein >> block; } - catch (std::exception &e) { - return error("%s : Deserialize or I/O error - %s", __func__, e.what()); + catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits)) - return error("ReadBlockFromDisk : Errors in block header"); + return error("ReadBlockFromDisk: Errors in block header"); return true; } @@ -1182,13 +1221,13 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex) if (!ReadBlockFromDisk(block, pindex->GetBlockPos())) return false; if (block.GetHash() != pindex->GetBlockHash()) - return error("ReadBlockFromDisk(CBlock&, CBlockIndex*) : GetHash() doesn't match index"); + return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index"); return true; } CAmount GetBlockValue(int nHeight, const CAmount& nFees) { - int64_t nSubsidy = 50 * COIN; + CAmount nSubsidy = 50 * COIN; int halvings = nHeight / Params().SubsidyHalvingInterval(); // Force block reward to zero when right shift is undefined. @@ -1354,9 +1393,20 @@ void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCach if (!tx.IsCoinBase()) { txundo.vprevout.reserve(tx.vin.size()); BOOST_FOREACH(const CTxIn &txin, tx.vin) { - txundo.vprevout.push_back(CTxInUndo()); - bool ret = inputs.ModifyCoins(txin.prevout.hash)->Spend(txin.prevout, txundo.vprevout.back()); - assert(ret); + CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash); + unsigned nPos = txin.prevout.n; + + if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull()) + assert(false); + // mark an outpoint spent, and construct undo information + txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos])); + coins->Spend(nPos); + if (coins->vout.size() == 0) { + CTxInUndo& undo = txundo.vprevout.back(); + undo.nHeight = coins->nHeight; + undo.fCoinBase = coins->fCoinBase; + undo.nVersion = coins->nVersion; + } } } @@ -1364,10 +1414,16 @@ void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCach inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); } +void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight) +{ + CTxUndo txundo; + UpdateCoins(tx, state, inputs, txundo, nHeight); +} + bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; - if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingSignatureChecker(*ptxTo, nIn, cacheStore), &error)) { - return ::error("CScriptCheck() : %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); + if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, cacheStore), &error)) { + return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); } return true; } @@ -1382,7 +1438,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi // This doesn't trigger the DoS code on purpose; if it did, it would make it easier // for an attacker to attempt to split the network. if (!inputs.HaveInputs(tx)) - return state.Invalid(error("CheckInputs() : %s inputs unavailable", tx.GetHash().ToString())); + return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString())); // While checking, GetBestBlock() refers to the parent block. // This is also true for mempool checks. @@ -1400,31 +1456,31 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi if (coins->IsCoinBase()) { if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) return state.Invalid( - error("CheckInputs() : tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight), + error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight), REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); } // Check for negative or overflow input values nValueIn += coins->vout[prevout.n].nValue; if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn)) - return state.DoS(100, error("CheckInputs() : txin values out of range"), + return state.DoS(100, error("CheckInputs(): txin values out of range"), REJECT_INVALID, "bad-txns-inputvalues-outofrange"); } if (nValueIn < tx.GetValueOut()) - return state.DoS(100, error("CheckInputs() : %s value in (%s) < value out (%s)", + return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s)", tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())), REJECT_INVALID, "bad-txns-in-belowout"); // Tally transaction fees CAmount nTxFee = nValueIn - tx.GetValueOut(); if (nTxFee < 0) - return state.DoS(100, error("CheckInputs() : %s nTxFee < 0", tx.GetHash().ToString()), + return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()), REJECT_INVALID, "bad-txns-fee-negative"); nFees += nTxFee; if (!MoneyRange(nFees)) - return state.DoS(100, error("CheckInputs() : nFees out of range"), + return state.DoS(100, error("CheckInputs(): nFees out of range"), REJECT_INVALID, "bad-txns-fee-outofrange"); // The first loop above does all the inexpensive checks. @@ -1474,7 +1530,63 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi return true; } +namespace { + +bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock) +{ + // Open history file to append + CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); + if (fileout.IsNull()) + return error("%s: OpenUndoFile failed", __func__); + + // Write index header + unsigned int nSize = fileout.GetSerializeSize(blockundo); + fileout << FLATDATA(Params().MessageStart()) << nSize; + + // Write undo data + long fileOutPos = ftell(fileout.Get()); + if (fileOutPos < 0) + return error("%s: ftell failed", __func__); + pos.nPos = (unsigned int)fileOutPos; + fileout << blockundo; + + // calculate & write checksum + CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); + hasher << hashBlock; + hasher << blockundo; + fileout << hasher.GetHash(); + + return true; +} + +bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock) +{ + // Open history file to read + CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); + if (filein.IsNull()) + return error("%s: OpenBlockFile failed", __func__); + // Read block + uint256 hashChecksum; + try { + filein >> blockundo; + filein >> hashChecksum; + } + catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + } + + // Verify checksum + CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); + hasher << hashBlock; + hasher << blockundo; + if (hashChecksum != hasher.GetHash()) + return error("%s: Checksum mismatch", __func__); + + return true; +} + +} // anon namespace bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean) { @@ -1488,12 +1600,12 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex CBlockUndo blockUndo; CDiskBlockPos pos = pindex->GetUndoPos(); if (pos.IsNull()) - return error("DisconnectBlock() : no undo data available"); - if (!blockUndo.ReadFromDisk(pos, pindex->pprev->GetBlockHash())) - return error("DisconnectBlock() : failure reading undo data"); + return error("DisconnectBlock(): no undo data available"); + if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) + return error("DisconnectBlock(): failure reading undo data"); if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) - return error("DisconnectBlock() : block and undo data inconsistent"); + return error("DisconnectBlock(): block and undo data inconsistent"); // undo transactions in reverse order for (int i = block.vtx.size() - 1; i >= 0; i--) { @@ -1516,7 +1628,7 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex if (outsBlock.nVersion < 0) outs->nVersion = outsBlock.nVersion; if (*outs != outsBlock) - fClean = fClean && error("DisconnectBlock() : added transaction mismatch? database corrupted"); + fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted"); // remove outputs outs->Clear(); @@ -1526,7 +1638,7 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex if (i > 0) { // not coinbases const CTxUndo &txundo = blockUndo.vtxundo[i-1]; if (txundo.vprevout.size() != tx.vin.size()) - return error("DisconnectBlock() : transaction and undo data inconsistent"); + return error("DisconnectBlock(): transaction and undo data inconsistent"); for (unsigned int j = tx.vin.size(); j-- > 0;) { const COutPoint &out = tx.vin[j].prevout; const CTxInUndo &undo = txundo.vprevout[j]; @@ -1534,17 +1646,17 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex if (undo.nHeight != 0) { // undo data contains height: this is the last output of the prevout tx being spent if (!coins->IsPruned()) - fClean = fClean && error("DisconnectBlock() : undo data overwriting existing transaction"); + fClean = fClean && error("DisconnectBlock(): undo data overwriting existing transaction"); coins->Clear(); coins->fCoinBase = undo.fCoinBase; coins->nHeight = undo.nHeight; coins->nVersion = undo.nVersion; } else { if (coins->IsPruned()) - fClean = fClean && error("DisconnectBlock() : undo data adding output to missing transaction"); + fClean = fClean && error("DisconnectBlock(): undo data adding output to missing transaction"); } if (coins->IsAvailable(out.n)) - fClean = fClean && error("DisconnectBlock() : undo data overwriting existing output"); + fClean = fClean && error("DisconnectBlock(): undo data overwriting existing output"); if (coins->vout.size() < out.n+1) coins->vout.resize(out.n+1); coins->vout[out.n] = undo.txout; @@ -1609,13 +1721,14 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin return false; // verify that the view's current state corresponds to the previous block - uint256 hashPrevBlock = pindex->pprev == NULL ? uint256(0) : pindex->pprev->GetBlockHash(); + uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its transactions // (its coinbase is unspendable) if (block.GetHash() == Params().HashGenesisBlock()) { - view.SetBestBlock(pindex->GetBlockHash()); + if (!fJustCheck) + view.SetBestBlock(pindex->GetBlockHash()); return true; } @@ -1634,13 +1747,13 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // two in the chain that violate it. This prevents exploiting the issue against nodes in their // initial block download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash. - !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || - (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))); + !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || + (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))); if (fEnforceBIP30) { BOOST_FOREACH(const CTransaction& tx, block.vtx) { const CCoins* coins = view.AccessCoins(tx.GetHash()); if (coins && !coins->IsPruned()) - return state.DoS(100, error("ConnectBlock() : tried to overwrite transaction"), + return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); } } @@ -1651,6 +1764,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; + // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks, when 75% of the network has upgraded: + if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, Params().EnforceBlockUpgradeMajority())) { + flags |= SCRIPT_VERIFY_DERSIG; + } + CBlockUndo blockundo; CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); @@ -1670,13 +1788,13 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin nInputs += tx.vin.size(); nSigOps += GetLegacySigOpCount(tx); if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("ConnectBlock() : too many sigops"), + return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); if (!tx.IsCoinBase()) { if (!view.HaveInputs(tx)) - return state.DoS(100, error("ConnectBlock() : inputs missing/spent"), + return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), REJECT_INVALID, "bad-txns-inputs-missingorspent"); if (fStrictPayToScriptHash) @@ -1686,7 +1804,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // an incredibly-expensive-to-validate block. nSigOps += GetP2SHSigOpCount(tx, view); if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("ConnectBlock() : too many sigops"), + return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } @@ -1712,7 +1830,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin if (block.vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees)) return state.DoS(100, - error("ConnectBlock() : coinbase pays too much (actual=%d vs limit=%d)", + error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", block.vtx[0].GetValueOut(), GetBlockValue(pindex->nHeight, nFees)), REJECT_INVALID, "bad-cb-amount"); @@ -1730,8 +1848,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin if (pindex->GetUndoPos().IsNull()) { CDiskBlockPos pos; if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) - return error("ConnectBlock() : FindUndoPos failed"); - if (!blockundo.WriteToDisk(pos, pindex->pprev->GetBlockHash())) + return error("ConnectBlock(): FindUndoPos failed"); + if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash())) return state.Abort("Failed to write undo data"); // update nUndoPos in block index @@ -1778,6 +1896,7 @@ enum FlushStateMode { bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { LOCK(cs_main); static int64_t nLastWrite = 0; + try { if ((mode == FLUSH_STATE_ALWAYS) || ((mode == FLUSH_STATE_PERIODIC || mode == FLUSH_STATE_IF_NEEDED) && pcoinsTip->GetCacheSize() > nCoinCacheSize) || (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000)) { @@ -1791,24 +1910,23 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { // First make sure all block and undo data is flushed to disk. FlushBlockFile(); // Then update all block file information (which may refer to block and undo files). - bool fileschanged = false; - for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { - if (!pblocktree->WriteBlockFileInfo(*it, vinfoBlockFile[*it])) { - return state.Abort("Failed to write to block index"); + { + std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; + vFiles.reserve(setDirtyFileInfo.size()); + for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { + vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it])); + setDirtyFileInfo.erase(it++); + } + std::vector<const CBlockIndex*> vBlocks; + vBlocks.reserve(setDirtyBlockIndex.size()); + for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { + vBlocks.push_back(*it); + setDirtyBlockIndex.erase(it++); + } + if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { + return state.Abort("Files to write to block index database"); } - fileschanged = true; - setDirtyFileInfo.erase(it++); - } - if (fileschanged && !pblocktree->WriteLastBlockFile(nLastBlockFile)) { - return state.Abort("Failed to write to block index"); - } - for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { - if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(*it))) { - return state.Abort("Failed to write to block index"); - } - setDirtyBlockIndex.erase(it++); } - pblocktree->Sync(); // Finally flush the chainstate (which may refer to block index entries). if (!pcoinsTip->Flush()) return state.Abort("Failed to write to coin database"); @@ -1818,6 +1936,9 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { } nLastWrite = GetTimeMicros(); } + } catch (const std::runtime_error& e) { + return state.Abort(std::string("System error while flushing: ") + e.what()); + } return true; } @@ -1879,7 +2000,7 @@ bool static DisconnectTip(CValidationState &state) { { CCoinsViewCache view(pcoinsTip); if (!DisconnectBlock(block, state, pindexDelete, view)) - return error("DisconnectTip() : DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); + return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); assert(view.Flush()); } LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); @@ -1891,10 +2012,10 @@ bool static DisconnectTip(CValidationState &state) { // ignore validation errors in resurrected transactions list<CTransaction> removed; CValidationState stateDummy; - if (!tx.IsCoinBase()) - if (!AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) - mempool.remove(tx, removed, true); + if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) + mempool.remove(tx, removed, true); } + mempool.removeCoinbaseSpends(pcoinsTip, pindexDelete->nHeight); mempool.check(pcoinsTip); // Update chainActive and related variables. UpdateTip(pindexDelete->pprev); @@ -1939,7 +2060,7 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock * if (!rv) { if (state.IsInvalid()) InvalidBlockFound(pindexNew, state); - return error("ConnectTip() : ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); + return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); } mapBlockSource.erase(inv.hash); nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; @@ -2376,12 +2497,12 @@ bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool f { // Check proof of work matches claimed amount if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits)) - return state.DoS(50, error("CheckBlockHeader() : proof of work failed"), + return state.DoS(50, error("CheckBlockHeader(): proof of work failed"), REJECT_INVALID, "high-hash"); // Check timestamp if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60) - return state.Invalid(error("CheckBlockHeader() : block timestamp too far in the future"), + return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"), REJECT_INVALID, "time-too-new"); return true; @@ -2401,14 +2522,14 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo bool mutated; uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated); if (block.hashMerkleRoot != hashMerkleRoot2) - return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"), + return state.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"), REJECT_INVALID, "bad-txnmrklroot", true); // Check for merkle tree malleability (CVE-2012-2459): repeating sequences // of transactions in a block without affecting the merkle root of a block, // while still invalidating it. if (mutated) - return state.DoS(100, error("CheckBlock() : duplicate transaction"), + return state.DoS(100, error("CheckBlock(): duplicate transaction"), REJECT_INVALID, "bad-txns-duplicate", true); } @@ -2418,22 +2539,22 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo // Size limits if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) - return state.DoS(100, error("CheckBlock() : size limits failed"), + return state.DoS(100, error("CheckBlock(): size limits failed"), REJECT_INVALID, "bad-blk-length"); // First transaction must be coinbase, the rest must not be if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) - return state.DoS(100, error("CheckBlock() : first tx is not coinbase"), + return state.DoS(100, error("CheckBlock(): first tx is not coinbase"), REJECT_INVALID, "bad-cb-missing"); for (unsigned int i = 1; i < block.vtx.size(); i++) if (block.vtx[i].IsCoinBase()) - return state.DoS(100, error("CheckBlock() : more than one coinbase"), + return state.DoS(100, error("CheckBlock(): more than one coinbase"), REJECT_INVALID, "bad-cb-multiple"); // Check transactions BOOST_FOREACH(const CTransaction& tx, block.vtx) if (!CheckTransaction(tx, state)) - return error("CheckBlock() : CheckTransaction failed"); + return error("CheckBlock(): CheckTransaction failed"); unsigned int nSigOps = 0; BOOST_FOREACH(const CTransaction& tx, block.vtx) @@ -2441,7 +2562,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo nSigOps += GetLegacySigOpCount(tx); } if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"), + return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"), REJECT_INVALID, "bad-blk-sigops", true); return true; @@ -2460,29 +2581,35 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta // Check proof of work if ((!Params().SkipProofOfWorkCheck()) && (block.nBits != GetNextWorkRequired(pindexPrev, &block))) - return state.DoS(100, error("%s : incorrect proof of work", __func__), + return state.DoS(100, error("%s: incorrect proof of work", __func__), REJECT_INVALID, "bad-diffbits"); // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) - return state.Invalid(error("%s : block's timestamp is too early", __func__), + return state.Invalid(error("%s: block's timestamp is too early", __func__), REJECT_INVALID, "time-too-old"); // Check that the block chain matches the known block chain up to a checkpoint if (!Checkpoints::CheckBlock(nHeight, hash)) - return state.DoS(100, error("%s : rejected by checkpoint lock-in at %d", __func__, nHeight), + return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight), REJECT_CHECKPOINT, "checkpoint mismatch"); // Don't accept any forks from the main chain prior to last checkpoint CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(); if (pcheckpoint && nHeight < pcheckpoint->nHeight) - return state.DoS(100, error("%s : forked chain older than last checkpoint (height %d)", __func__, nHeight)); + return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight)); // Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded: - if (block.nVersion < 2 && - CBlockIndex::IsSuperMajority(2, pindexPrev, Params().RejectBlockOutdatedMajority())) + if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, Params().RejectBlockOutdatedMajority())) { - return state.Invalid(error("%s : rejected nVersion=1 block", __func__), + return state.Invalid(error("%s: rejected nVersion=1 block", __func__), + REJECT_OBSOLETE, "bad-version"); + } + + // Reject block.nVersion=2 blocks when 95% (75% on testnet) of the network has upgraded: + if (block.nVersion < 3 && IsSuperMajority(3, pindexPrev, Params().RejectBlockOutdatedMajority())) + { + return state.Invalid(error("%s : rejected nVersion=2 block", __func__), REJECT_OBSOLETE, "bad-version"); } @@ -2496,18 +2623,17 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn // Check that all transactions are finalized BOOST_FOREACH(const CTransaction& tx, block.vtx) if (!IsFinalTx(tx, nHeight, block.GetBlockTime())) { - return state.DoS(10, error("%s : contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); + return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); } // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet): - if (block.nVersion >= 2 && - CBlockIndex::IsSuperMajority(2, pindexPrev, Params().EnforceBlockUpgradeMajority())) + if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, Params().EnforceBlockUpgradeMajority())) { CScript expect = CScript() << nHeight; if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) { - return state.DoS(100, error("%s : block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height"); + return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height"); } } @@ -2527,7 +2653,7 @@ bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBloc if (ppindex) *ppindex = pindex; if (pindex->nStatus & BLOCK_FAILED_MASK) - return state.Invalid(error("%s : block is marked invalid", __func__), 0, "duplicate"); + return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate"); return true; } @@ -2539,8 +2665,10 @@ bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBloc if (hash != Params().HashGenesisBlock()) { BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) - return state.DoS(10, error("%s : prev block not found", __func__), 0, "bad-prevblk"); + return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk"); pindexPrev = (*mi).second; + if (pindexPrev->nStatus & BLOCK_FAILED_MASK) + return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); } if (!ContextualCheckBlockHeader(block, state, pindexPrev)) @@ -2566,7 +2694,7 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, if (pindex->nStatus & BLOCK_HAVE_DATA) { // TODO: deal better with duplicate blocks. - // return state.DoS(20, error("AcceptBlock() : already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate"); + // return state.DoS(20, error("AcceptBlock(): already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate"); return true; } @@ -2587,20 +2715,20 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, if (dbp != NULL) blockPos = *dbp; if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) - return error("AcceptBlock() : FindBlockPos failed"); + return error("AcceptBlock(): FindBlockPos failed"); if (dbp == NULL) if (!WriteBlockToDisk(block, blockPos)) return state.Abort("Failed to write block"); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) - return error("AcceptBlock() : ReceivedBlockTransactions failed"); - } catch(std::runtime_error &e) { + return error("AcceptBlock(): ReceivedBlockTransactions failed"); + } catch (const std::runtime_error& e) { return state.Abort(std::string("System error: ") + e.what()); } return true; } -bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired) +static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired) { unsigned int nToCheck = Params().ToCheckBlockUpgradeMajority(); unsigned int nFound = 0; @@ -2613,54 +2741,6 @@ bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, uns return (nFound >= nRequired); } -/** Turn the lowest '1' bit in the binary representation of a number into a '0'. */ -int static inline InvertLowestOne(int n) { return n & (n - 1); } - -/** Compute what height to jump back to with the CBlockIndex::pskip pointer. */ -int static inline GetSkipHeight(int height) { - if (height < 2) - return 0; - - // Determine which height to jump back to. Any number strictly lower than height is acceptable, - // but the following expression seems to perform well in simulations (max 110 steps to go back - // up to 2**18 blocks). - return (height & 1) ? InvertLowestOne(InvertLowestOne(height - 1)) + 1 : InvertLowestOne(height); -} - -CBlockIndex* CBlockIndex::GetAncestor(int height) -{ - if (height > nHeight || height < 0) - return NULL; - - CBlockIndex* pindexWalk = this; - int heightWalk = nHeight; - while (heightWalk > height) { - int heightSkip = GetSkipHeight(heightWalk); - int heightSkipPrev = GetSkipHeight(heightWalk - 1); - if (heightSkip == height || - (heightSkip > height && !(heightSkipPrev < heightSkip - 2 && - heightSkipPrev >= height))) { - // Only follow pskip if pprev->pskip isn't better than pskip->pprev. - pindexWalk = pindexWalk->pskip; - heightWalk = heightSkip; - } else { - pindexWalk = pindexWalk->pprev; - heightWalk--; - } - } - return pindexWalk; -} - -const CBlockIndex* CBlockIndex::GetAncestor(int height) const -{ - return const_cast<CBlockIndex*>(this)->GetAncestor(height); -} - -void CBlockIndex::BuildSkip() -{ - if (pprev) - pskip = pprev->GetAncestor(GetSkipHeight(nHeight)); -} bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp) { @@ -2671,7 +2751,7 @@ bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDis LOCK(cs_main); MarkBlockAsReceived(pblock->GetHash()); if (!checked) { - return error("%s : CheckBlock FAILED", __func__); + return error("%s: CheckBlock FAILED", __func__); } // Store to disk @@ -2681,11 +2761,11 @@ bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDis mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId(); } if (!ret) - return error("%s : AcceptBlock FAILED", __func__); + return error("%s: AcceptBlock FAILED", __func__); } if (!ActivateBestChain(state, pblock)) - return error("%s : ActivateBestChain failed", __func__); + return error("%s: ActivateBestChain failed", __func__); return true; } @@ -2721,159 +2801,6 @@ bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex -CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter) -{ - header = block.GetBlockHeader(); - - vector<bool> vMatch; - vector<uint256> vHashes; - - vMatch.reserve(block.vtx.size()); - vHashes.reserve(block.vtx.size()); - - for (unsigned int i = 0; i < block.vtx.size(); i++) - { - const uint256& hash = block.vtx[i].GetHash(); - if (filter.IsRelevantAndUpdate(block.vtx[i])) - { - vMatch.push_back(true); - vMatchedTxn.push_back(make_pair(i, hash)); - } - else - vMatch.push_back(false); - vHashes.push_back(hash); - } - - txn = CPartialMerkleTree(vHashes, vMatch); -} - - - - - - - - -uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::vector<uint256> &vTxid) { - if (height == 0) { - // hash at height 0 is the txids themself - return vTxid[pos]; - } else { - // calculate left hash - uint256 left = CalcHash(height-1, pos*2, vTxid), right; - // calculate right hash if not beyond the end of the array - copy left hash otherwise1 - if (pos*2+1 < CalcTreeWidth(height-1)) - right = CalcHash(height-1, pos*2+1, vTxid); - else - right = left; - // combine subhashes - return Hash(BEGIN(left), END(left), BEGIN(right), END(right)); - } -} - -void CPartialMerkleTree::TraverseAndBuild(int height, unsigned int pos, const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) { - // determine whether this node is the parent of at least one matched txid - bool fParentOfMatch = false; - for (unsigned int p = pos << height; p < (pos+1) << height && p < nTransactions; p++) - fParentOfMatch |= vMatch[p]; - // store as flag bit - vBits.push_back(fParentOfMatch); - if (height==0 || !fParentOfMatch) { - // if at height 0, or nothing interesting below, store hash and stop - vHash.push_back(CalcHash(height, pos, vTxid)); - } else { - // otherwise, don't store any hash, but descend into the subtrees - TraverseAndBuild(height-1, pos*2, vTxid, vMatch); - if (pos*2+1 < CalcTreeWidth(height-1)) - TraverseAndBuild(height-1, pos*2+1, vTxid, vMatch); - } -} - -uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, unsigned int &nBitsUsed, unsigned int &nHashUsed, std::vector<uint256> &vMatch) { - if (nBitsUsed >= vBits.size()) { - // overflowed the bits array - failure - fBad = true; - return 0; - } - bool fParentOfMatch = vBits[nBitsUsed++]; - if (height==0 || !fParentOfMatch) { - // if at height 0, or nothing interesting below, use stored hash and do not descend - if (nHashUsed >= vHash.size()) { - // overflowed the hash array - failure - fBad = true; - return 0; - } - const uint256 &hash = vHash[nHashUsed++]; - if (height==0 && fParentOfMatch) // in case of height 0, we have a matched txid - vMatch.push_back(hash); - return hash; - } else { - // otherwise, descend into the subtrees to extract matched txids and hashes - uint256 left = TraverseAndExtract(height-1, pos*2, nBitsUsed, nHashUsed, vMatch), right; - if (pos*2+1 < CalcTreeWidth(height-1)) - right = TraverseAndExtract(height-1, pos*2+1, nBitsUsed, nHashUsed, vMatch); - else - right = left; - // and combine them before returning - return Hash(BEGIN(left), END(left), BEGIN(right), END(right)); - } -} - -CPartialMerkleTree::CPartialMerkleTree(const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) : nTransactions(vTxid.size()), fBad(false) { - // reset state - vBits.clear(); - vHash.clear(); - - // calculate height of tree - int nHeight = 0; - while (CalcTreeWidth(nHeight) > 1) - nHeight++; - - // traverse the partial tree - TraverseAndBuild(nHeight, 0, vTxid, vMatch); -} - -CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {} - -uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256> &vMatch) { - vMatch.clear(); - // An empty set will not work - if (nTransactions == 0) - return 0; - // check for excessively high numbers of transactions - if (nTransactions > MAX_BLOCK_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction - return 0; - // there can never be more hashes provided than one for every txid - if (vHash.size() > nTransactions) - return 0; - // there must be at least one bit per node in the partial tree, and at least one node per hash - if (vBits.size() < vHash.size()) - return 0; - // calculate height of tree - int nHeight = 0; - while (CalcTreeWidth(nHeight) > 1) - nHeight++; - // traverse the partial tree - unsigned int nBitsUsed = 0, nHashUsed = 0; - uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch); - // verify that no problems occured during the tree traversal - if (fBad) - return 0; - // verify that all bits were consumed (except for the padding caused by serializing it as a byte sequence) - if ((nBitsUsed+7)/8 != (vBits.size()+7)/8) - return 0; - // verify that all hashes were consumed - if (nHashUsed != vHash.size()) - return 0; - return hashMerkleRoot; -} - - - - - - - bool AbortNode(const std::string &strMessage, const std::string &userMessage) { strMiscWarning = strMessage; LogPrintf("*** %s\n", strMessage); @@ -2886,7 +2813,7 @@ bool AbortNode(const std::string &strMessage, const std::string &userMessage) { bool CheckDiskSpace(uint64_t nAdditionalBytes) { - uint64_t nFreeBytesAvailable = filesystem::space(GetDataDir()).available; + uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) @@ -2933,7 +2860,7 @@ boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char CBlockIndex * InsertBlockIndex(uint256 hash) { - if (hash == 0) + if (hash.IsNull()) return NULL; // Return existing @@ -2944,7 +2871,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash) // Create new CBlockIndex* pindexNew = new CBlockIndex(); if (!pindexNew) - throw runtime_error("LoadBlockIndex() : new CBlockIndex failed"); + throw runtime_error("LoadBlockIndex(): new CBlockIndex failed"); mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); @@ -3090,24 +3017,24 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex)) - return error("VerifyDB() : *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state)) - return error("VerifyDB() : *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; CDiskBlockPos pos = pindex->GetUndoPos(); if (!pos.IsNull()) { - if (!undo.ReadFromDisk(pos, pindex->pprev->GetBlockHash())) - return error("VerifyDB() : *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) + return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); } } // check level 3: check for inconsistencies during memory-only disconnect of tip blocks if (nCheckLevel >= 3 && pindex == pindexState && (coins.GetCacheSize() + pcoinsTip->GetCacheSize()) <= nCoinCacheSize) { bool fClean = true; if (!DisconnectBlock(block, state, pindex, coins, &fClean)) - return error("VerifyDB() : *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); pindexState = pindex->pprev; if (!fClean) { nGoodTransactions = 0; @@ -3115,9 +3042,11 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth } else nGoodTransactions += block.vtx.size(); } + if (ShutdownRequested()) + return true; } if (pindexFailure) - return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { @@ -3128,9 +3057,9 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex)) - return error("VerifyDB() : *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); if (!ConnectBlock(block, state, pindex, coins)) - return error("VerifyDB() : *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } } @@ -3176,18 +3105,18 @@ bool InitBlockIndex() { CDiskBlockPos blockPos; CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) - return error("LoadBlockIndex() : FindBlockPos failed"); + return error("LoadBlockIndex(): FindBlockPos failed"); if (!WriteBlockToDisk(block, blockPos)) - return error("LoadBlockIndex() : writing genesis block to disk failed"); + return error("LoadBlockIndex(): writing genesis block to disk failed"); CBlockIndex *pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) - return error("LoadBlockIndex() : genesis block not accepted"); + return error("LoadBlockIndex(): genesis block not accepted"); if (!ActivateBestChain(state, &block)) - return error("LoadBlockIndex() : genesis block cannot be activated"); - // Force a chainstate write so that when we VerifyDB in a moment, it doesnt check stale data + return error("LoadBlockIndex(): genesis block cannot be activated"); + // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data return FlushStateToDisk(state, FLUSH_STATE_ALWAYS); - } catch(std::runtime_error &e) { - return error("LoadBlockIndex() : failed to initialize block database: %s", e.what()); + } catch (const std::runtime_error& e) { + return error("LoadBlockIndex(): failed to initialize block database: %s", e.what()); } } @@ -3226,7 +3155,7 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) blkdat >> nSize; if (nSize < 80 || nSize > MAX_BLOCK_SIZE) continue; - } catch (const std::exception &) { + } catch (const std::exception&) { // no valid block header found; don't complain break; } @@ -3286,11 +3215,11 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) mapBlocksUnknownParent.erase(it); } } - } catch (std::exception &e) { - LogPrintf("%s : Deserialize or I/O error - %s", __func__, e.what()); + } catch (const std::exception& e) { + LogPrintf("%s: Deserialize or I/O error - %s", __func__, e.what()); } } - } catch(std::runtime_error &e) { + } catch (const std::runtime_error& e) { AbortNode(std::string("System error: ") + e.what()); } if (nLoaded > 0) @@ -3351,7 +3280,7 @@ string GetWarnings(string strFor) return strStatusBar; else if (strFor == "rpc") return strRPC; - assert(!"GetWarnings() : invalid parameter"); + assert(!"GetWarnings(): invalid parameter"); return "error"; } @@ -3465,7 +3394,7 @@ void static ProcessGetData(CNode* pfrom) vector<CInv> vInv; vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); pfrom->PushMessage("inv", vInv); - pfrom->hashContinue = 0; + pfrom->hashContinue.SetNull(); } } } @@ -3646,7 +3575,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, remoteAddr); - AddTimeData(pfrom->addr, nTime); + int64_t nTimeOffset = nTime - GetTime(); + pfrom->nTimeOffset = nTimeOffset; + AddTimeData(pfrom->addr, nTimeOffset); } @@ -3698,10 +3629,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // Use deterministic randomness to send to the same nodes for 24 hours // at a time so the setAddrKnowns of the chosen nodes prevent repeats static uint256 hashSalt; - if (hashSalt == 0) + if (hashSalt.IsNull()) hashSalt = GetRandHash(); uint64_t hashAddr = addr.GetHash(); - uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60)); + uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60))); hashRand = Hash(BEGIN(hashRand), END(hashRand)); multimap<uint256, CNode*> mapMix; BOOST_FOREACH(CNode* pnode, vNodes) @@ -3710,7 +3641,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, continue; unsigned int nPointer; memcpy(&nPointer, &pnode, sizeof(nPointer)); - uint256 hashKey = hashRand ^ nPointer; + uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer); hashKey = Hash(BEGIN(hashKey), END(hashKey)); mapMix.insert(make_pair(hashKey, pnode)); } @@ -3770,7 +3701,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // doing this will result in the received block being rejected as an orphan in case it is // not a direct successor. pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash); - if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - Params().TargetSpacing() * 20) { + CNodeState *nodestate = State(pfrom->GetId()); + if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - Params().TargetSpacing() * 20 && + nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vToFetch.push_back(inv); // Mark block as in flight already, even though the actual "getdata" message only goes out // later (within the same cs_main lock, though). @@ -3830,7 +3763,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, if (pindex) pindex = chainActive.Next(pindex); int nLimit = 500; - LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop==uint256(0) ? "end" : hashStop.ToString(), nLimit, pfrom->id); + LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { if (pindex->GetBlockHash() == hashStop) @@ -3914,7 +3847,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, vWorkQueue.push_back(inv.hash); vEraseQueue.push_back(inv.hash); - LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s : accepted %s (poolsz %u)\n", + LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s: accepted %s (poolsz %u)\n", pfrom->id, pfrom->cleanSubVer, tx.GetHash().ToString(), mempool.mapTx.size()); @@ -4046,7 +3979,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue // from there instead. LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); - pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256(0)); + pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256()); } } @@ -4284,7 +4217,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, ss << ": hash " << hash.ToString(); } LogPrint("net", "Reject %s\n", SanitizeString(ss.str())); - } catch (std::ios_base::failure& e) { + } catch (const std::ios_base::failure&) { // Avoid feedback loops by preventing reject messages from triggering a new reject message. LogPrint("net", "Unparseable reject message received\n"); } @@ -4376,7 +4309,7 @@ bool ProcessMessages(CNode* pfrom) memcpy(&nChecksum, &hash, sizeof(nChecksum)); if (nChecksum != hdr.nChecksum) { - LogPrintf("ProcessMessages(%s, %u bytes) : CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", + LogPrintf("ProcessMessages(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", strCommand, nMessageSize, nChecksum, hdr.nChecksum); continue; } @@ -4388,28 +4321,28 @@ bool ProcessMessages(CNode* pfrom) fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime); boost::this_thread::interruption_point(); } - catch (std::ios_base::failure& e) + catch (const std::ios_base::failure& e) { pfrom->PushMessage("reject", strCommand, REJECT_MALFORMED, string("error parsing message")); if (strstr(e.what(), "end of data")) { // Allow exceptions from under-length message on vRecv - LogPrintf("ProcessMessages(%s, %u bytes) : Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand, nMessageSize, e.what()); + LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand, nMessageSize, e.what()); } else if (strstr(e.what(), "size too large")) { // Allow exceptions from over-long size - LogPrintf("ProcessMessages(%s, %u bytes) : Exception '%s' caught\n", strCommand, nMessageSize, e.what()); + LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught\n", strCommand, nMessageSize, e.what()); } else { PrintExceptionContinue(&e, "ProcessMessages()"); } } - catch (boost::thread_interrupted) { + catch (const boost::thread_interrupted&) { throw; } - catch (std::exception& e) { + catch (const std::exception& e) { PrintExceptionContinue(&e, "ProcessMessages()"); } catch (...) { PrintExceptionContinue(NULL, "ProcessMessages()"); @@ -4544,7 +4477,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) nSyncStarted++; CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader; LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); - pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256(0)); + pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256()); } } @@ -4575,11 +4508,11 @@ bool SendMessages(CNode* pto, bool fSendTrickle) { // 1/4 of tx invs blast to all immediately static uint256 hashSalt; - if (hashSalt == 0) + if (hashSalt.IsNull()) hashSalt = GetRandHash(); - uint256 hashRand = inv.hash ^ hashSalt; + uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt)); hashRand = Hash(BEGIN(hashRand), END(hashRand)); - bool fTrickleWait = ((hashRand & 3) != 0); + bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0); if (fTrickleWait) { @@ -4613,6 +4546,15 @@ bool SendMessages(CNode* pto, bool fSendTrickle) LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); pto->fDisconnect = true; } + // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval + // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to + // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link + // being saturated. We only count validated in-flight blocks so peers can't advertize nonexisting block hashes + // to unreasonably increase our timeout. + if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * Params().TargetSpacing() * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { + LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); + pto->fDisconnect = true; + } // // Message: getdata (blocks) @@ -4662,61 +4604,6 @@ bool SendMessages(CNode* pto, bool fSendTrickle) return true; } - -bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock) -{ - // Open history file to append - CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); - if (fileout.IsNull()) - return error("CBlockUndo::WriteToDisk : OpenUndoFile failed"); - - // Write index header - unsigned int nSize = fileout.GetSerializeSize(*this); - fileout << FLATDATA(Params().MessageStart()) << nSize; - - // Write undo data - long fileOutPos = ftell(fileout.Get()); - if (fileOutPos < 0) - return error("CBlockUndo::WriteToDisk : ftell failed"); - pos.nPos = (unsigned int)fileOutPos; - fileout << *this; - - // calculate & write checksum - CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); - hasher << hashBlock; - hasher << *this; - fileout << hasher.GetHash(); - - return true; -} - -bool CBlockUndo::ReadFromDisk(const CDiskBlockPos &pos, const uint256 &hashBlock) -{ - // Open history file to read - CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); - if (filein.IsNull()) - return error("CBlockUndo::ReadFromDisk : OpenBlockFile failed"); - - // Read block - uint256 hashChecksum; - try { - filein >> *this; - filein >> hashChecksum; - } - catch (std::exception &e) { - return error("%s : Deserialize or I/O error - %s", __func__, e.what()); - } - - // Verify checksum - CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); - hasher << hashBlock; - hasher << *this; - if (hashChecksum != hasher.GetHash()) - return error("CBlockUndo::ReadFromDisk : Checksum mismatch"); - - return true; -} - std::string CBlockFileInfo::ToString() const { return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } |