aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPieter Wuille <pieter.wuille@gmail.com>2017-11-03 15:32:02 -0700
committerPieter Wuille <pieter.wuille@gmail.com>2017-11-03 15:42:50 -0700
commitf518d9ae6aa525c4cd360a16f07a9272cbef558d (patch)
tree335ee53ff19753f10dfd40b42570e0419701e183
parentf224cbc3d8f4478d30e121126658c2ab22c6fb90 (diff)
parent8195cb0d7fc4f8699b35aff7a43ed2fb3013608c (diff)
downloadbitcoin-0.15.1rc1.tar.xz
Merge #11592: 0.15: Backportsv0.15.1rc1
8195cb0d7 rpc: further constrain the libevent workaround (Cory Fields) 34153a7e4 rpc: work-around an upstream libevent bug (Cory Fields) fc308a6cd Add unit test for stale tip checking (Suhas Daftuar) 2ed0647ac Add CConnmanTest to mutate g_connman in tests (João Barbosa) a607a95d8 Connect to an extra outbound peer if our tip is stale (Suhas Daftuar) 459f2db42 Track tip update time and last new block announcement from each peer (Suhas Daftuar) 49bf09018 net: Allow connecting to extra outbound peers (Suhas Daftuar) bb83fe190 Add release notes describing blockmaxweight deprecation (Matt Corallo) 4c82cea99 Use a sensible default for blockmaxweight (Matt Corallo) 7871a7d3b Deprecate confusing blockmaxsize, fix getmininginfo output (Matt Corallo) 6baa317b5 Fix minchainwork test for 0.15 backport (Suhas Daftuar) 55b7abfa8 Make p2p-acceptablock not an extended test (Matt Corallo) 5bec7744d [qa] test that invalid blocks on an invalid chain get a disconnect (Matt Corallo) 92d6105c4 Reject headers building on invalid chains by tracking invalidity (Matt Corallo) 51001d684 Accept unrequested blocks with work equal to our tip (Matt Corallo) c6e4d0ce8 Stop always storing blocks from whitelisted peers (Matt Corallo) e976c36dd Rewrite p2p-acceptblock in preparation for slight behavior changes (Matt Corallo) ec8dedff4 net: Add missing lock in ProcessHeadersMessage(...) (practicalswift) 59b210d9a Disconnect outbound peers relaying invalid headers (Suhas Daftuar) fc966bbd2 moveonly: factor out headers processing into separate function (Suhas Daftuar) e3272242e Add unit test for outbound peer eviction (Suhas Daftuar) 9961abf9e Permit disconnection of outbound peers on bad/slow chains (Suhas Daftuar) bf191a718 Disconnecting from bad outbound peers in IBD (Suhas Daftuar) d570aa429 Fix uninitialized g_connman crash in Shutdown() (MeshCollider) 0a5477c7e net: stop both net/net_processing before destroying them (Cory Fields) b4136f21c net: drop unused connman param (Cory Fields) dc897e53d net: use an interface class rather than signals for message processing (Cory Fields) 8aee55af3 net: pass CConnman via pointer rather than reference (Cory Fields) 6f279652b Rename fAddnode to a more-descriptive "manual_connection" (Matt Corallo) ffb6ea4e5 Add comment explaining forced processing of compact blocks (Suhas Daftuar) 2df65eeb9 qa: add test for minchainwork use in acceptblock (Suhas Daftuar) 3acec3878 Don't process unrequested, low-work blocks (Suhas Daftuar) 0e9d04bf0 [qa] Test nMinimumChainWork (Suhas Daftuar) da4908c3a Allow setting nMinimumChainWork on command line (Suhas Daftuar) 41088795d qa: Remove never used return value of sync_with_ping (MarcoFalke) f3457d0e8 qa: Make tmpdir option an absolute path (MarcoFalke) 9c8006dc3 Avoid opening copied wallet databases simultaneously (Russell Yanofsky) de7053f11 [wallet] Fix leak in CDB constructor (João Barbosa) fd79ed6b2 Ensure backupwallet fails when attempting to backup to source file (Tomas van der Wansem) d94fc336c scripted-diff: rename assert_raises_jsonrpc to assert_raises_rpc error (John Newbery) 623de0acb [tests] do not allow assert_raises_message to be called with JSONRPCException (John Newbery) 5b728c8e9 [tests] remove direct testing on JSONRPCException from individual test cases (John Newbery) Pull request description: Tree-SHA512: 9fdb5c47844a899271023d8d445f7fc728e3ad71916490cd9783464684967594b07cda05dd644b722bfcea9fade74d06cfc501e1a68abf118d6d03fbbf7d7707
-rw-r--r--doc/release-notes.md16
-rw-r--r--src/httpserver.cpp32
-rw-r--r--src/init.cpp37
-rw-r--r--src/miner.cpp38
-rw-r--r--src/miner.h4
-rw-r--r--src/net.cpp67
-rw-r--r--src/net.h51
-rw-r--r--src/net_processing.cpp892
-rw-r--r--src/net_processing.h57
-rw-r--r--src/policy/policy.h4
-rw-r--r--src/rpc/mining.cpp2
-rw-r--r--src/rpc/net.cpp2
-rw-r--r--src/test/DoS_tests.cpp162
-rw-r--r--src/test/test_bitcoin.cpp18
-rw-r--r--src/test/test_bitcoin.h8
-rw-r--r--src/test/util_tests.cpp25
-rw-r--r--src/utilstrencodings.cpp13
-rw-r--r--src/utilstrencodings.h6
-rw-r--r--src/validation.cpp99
-rw-r--r--src/validation.h7
-rw-r--r--src/wallet/db.cpp74
-rw-r--r--src/wallet/db.h2
-rwxr-xr-xtest/functional/bip68-sequence.py10
-rwxr-xr-xtest/functional/blockchain.py4
-rwxr-xr-xtest/functional/bumpfee.py14
-rwxr-xr-xtest/functional/disablewallet.py4
-rwxr-xr-xtest/functional/disconnect_ban.py12
-rwxr-xr-xtest/functional/fundrawtransaction.py12
-rwxr-xr-xtest/functional/import-rescan.py30
-rwxr-xr-xtest/functional/importmulti.py4
-rwxr-xr-xtest/functional/importprunedfunds.py4
-rwxr-xr-xtest/functional/keypool.py8
-rwxr-xr-xtest/functional/mempool_packages.py4
-rwxr-xr-xtest/functional/mempool_reorg.py4
-rwxr-xr-xtest/functional/mempool_spendcoinbase.py2
-rwxr-xr-xtest/functional/merkle_blocks.py8
-rwxr-xr-xtest/functional/minchainwork.py93
-rwxr-xr-xtest/functional/mining.py11
-rwxr-xr-xtest/functional/multiwallet.py12
-rwxr-xr-xtest/functional/net.py4
-rwxr-xr-xtest/functional/nulldummy.py6
-rwxr-xr-xtest/functional/p2p-acceptblock.py302
-rwxr-xr-xtest/functional/prioritise_transaction.py2
-rwxr-xr-xtest/functional/pruning.py8
-rwxr-xr-xtest/functional/rawtransactions.py12
-rwxr-xr-xtest/functional/replace-by-fee.py30
-rwxr-xr-xtest/functional/resendwallettransactions.py4
-rwxr-xr-xtest/functional/rpcbind_test.py2
-rwxr-xr-xtest/functional/rpcnamedargs.py4
-rwxr-xr-xtest/functional/segwit.py14
-rwxr-xr-xtest/functional/signrawtransactions.py2
-rwxr-xr-xtest/functional/test_framework/mininode.py1
-rwxr-xr-xtest/functional/test_framework/test_framework.py3
-rw-r--r--test/functional/test_framework/util.py14
-rwxr-xr-xtest/functional/test_runner.py3
-rwxr-xr-xtest/functional/wallet-dump.py4
-rwxr-xr-xtest/functional/wallet-encryption.py12
-rwxr-xr-xtest/functional/wallet.py8
-rwxr-xr-xtest/functional/walletbackup.py10
-rwxr-xr-xtest/functional/zapwallettxes.py4
60 files changed, 1617 insertions, 684 deletions
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 1bb02e3761..6e4bd03cd5 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -72,6 +72,22 @@ Notable changes
value is passed, instead of returning a list of all wallet transactions since
the genesis block.
+Miner block size limiting deprecated
+------------------------------------
+
+Though blockmaxweight has been preferred for limiting the size of blocks returned by
+getblocktemplate since 0.13.0, blockmaxsize remained as an option for those who wished
+to limit their block size directly. Using this option resulted in a few UI issues as
+well as non-optimal fee selection and ever-so-slightly worse performance, and has thus
+now been deprecated. Further, the blockmaxsize option is now used only to calculate an
+implied blockmaxweight, instead of limiting block size directly. Any miners who wish
+to limit their blocks by size, instead of by weight, will have to do so manually by
+removing transactions from their block template directly.
+
+Low-level RPC changes
+----------------------
+- The "currentblocksize" value in getmininginfo has been removed.
+
Credits
=======
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 86b37f79bb..dc7016b85b 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -24,6 +24,7 @@
#include <event2/thread.h>
#include <event2/buffer.h>
+#include <event2/bufferevent.h>
#include <event2/util.h>
#include <event2/keyvalq_struct.h>
@@ -239,6 +240,16 @@ static std::string RequestMethodString(HTTPRequest::RequestMethod m)
/** HTTP request callback */
static void http_request_cb(struct evhttp_request* req, void* arg)
{
+ // Disable reading to work around a libevent bug, fixed in 2.2.0.
+ if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02020001) {
+ evhttp_connection* conn = evhttp_request_get_connection(req);
+ if (conn) {
+ bufferevent* bev = evhttp_connection_get_bufferevent(conn);
+ if (bev) {
+ bufferevent_disable(bev, EV_READ);
+ }
+ }
+ }
std::unique_ptr<HTTPRequest> hreq(new HTTPRequest(req));
LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n",
@@ -599,11 +610,24 @@ void HTTPRequest::WriteReply(int nStatus, const std::string& strReply)
struct evbuffer* evb = evhttp_request_get_output_buffer(req);
assert(evb);
evbuffer_add(evb, strReply.data(), strReply.size());
- HTTPEvent* ev = new HTTPEvent(eventBase, true,
- std::bind(evhttp_send_reply, req, nStatus, (const char*)nullptr, (struct evbuffer *)nullptr));
- ev->trigger(0);
+ auto req_copy = req;
+ HTTPEvent* ev = new HTTPEvent(eventBase, true, [req_copy, nStatus]{
+ evhttp_send_reply(req_copy, nStatus, nullptr, nullptr);
+ // Re-enable reading from the socket. This is the second part of the libevent
+ // workaround above.
+ if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02020001) {
+ evhttp_connection* conn = evhttp_request_get_connection(req_copy);
+ if (conn) {
+ bufferevent* bev = evhttp_connection_get_bufferevent(conn);
+ if (bev) {
+ bufferevent_enable(bev, EV_READ | EV_WRITE);
+ }
+ }
+ }
+ });
+ ev->trigger(nullptr);
replySent = true;
- req = 0; // transferred back to main thread
+ req = nullptr; // transferred back to main thread
}
CService HTTPRequest::GetPeer()
diff --git a/src/init.cpp b/src/init.cpp
index a708295f43..5196eee953 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -193,12 +193,15 @@ void Shutdown()
}
#endif
MapPort(false);
+
+ // Because these depend on each-other, we make sure that neither can be
+ // using the other before destroying them.
UnregisterValidationInterface(peerLogic.get());
+ if(g_connman) g_connman->Stop();
peerLogic.reset();
g_connman.reset();
StopTorControl();
- UnregisterNodeSignals(GetNodeSignals());
if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool();
}
@@ -362,6 +365,9 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-maxorphantx=<n>", strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxmempool=<n>", strprintf(_("Keep the transaction memory pool below <n> megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE));
strUsage += HelpMessageOpt("-mempoolexpiry=<n>", strprintf(_("Do not keep transactions in the mempool longer than <n> hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY));
+ if (showDebug) {
+ strUsage += HelpMessageOpt("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()));
+ }
strUsage += HelpMessageOpt("-persistmempool", strprintf(_("Whether to save the mempool on shutdown and load on restart (default: %u)"), DEFAULT_PERSIST_MEMPOOL));
strUsage += HelpMessageOpt("-blockreconstructionextratxn=<n>", strprintf(_("Extra transactions to keep in memory for compact block reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN));
strUsage += HelpMessageOpt("-par=<n>", strprintf(_("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)"),
@@ -494,7 +500,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt("-blockmaxweight=<n>", strprintf(_("Set maximum BIP141 block weight (default: %d)"), DEFAULT_BLOCK_MAX_WEIGHT));
- strUsage += HelpMessageOpt("-blockmaxsize=<n>", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE));
+ strUsage += HelpMessageOpt("-blockmaxsize=<n>", _("Set maximum BIP141 block weight to this * 4. Deprecated, use blockmaxweight"));
strUsage += HelpMessageOpt("-blockmintxfee=<amt>", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)));
if (showDebug)
strUsage += HelpMessageOpt("-blockversion=<n>", "Override block version to test forking scenarios");
@@ -796,6 +802,15 @@ void InitParameterInteraction()
if (gArgs.SoftSetBoolArg("-whitelistrelay", true))
LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__);
}
+
+ if (gArgs.IsArgSet("-blockmaxsize")) {
+ unsigned int max_size = gArgs.GetArg("-blockmaxsize", 0);
+ if (gArgs.SoftSetArg("blockmaxweight", strprintf("%d", max_size * WITNESS_SCALE_FACTOR))) {
+ LogPrintf("%s: parameter interaction: -blockmaxsize=%d -> setting -blockmaxweight=%d (-blockmaxsize is deprecated!)\n", __func__, max_size, max_size * WITNESS_SCALE_FACTOR);
+ } else {
+ LogPrintf("%s: Ignoring blockmaxsize setting which is overridden by blockmaxweight", __func__);
+ }
+ }
}
static std::string ResolveErrMsg(const char * const optname, const std::string& strBind)
@@ -979,6 +994,20 @@ bool AppInitParameterInteraction()
else
LogPrintf("Validating signatures for all blocks.\n");
+ if (gArgs.IsArgSet("-minimumchainwork")) {
+ const std::string minChainWorkStr = gArgs.GetArg("-minimumchainwork", "");
+ if (!IsHexNumber(minChainWorkStr)) {
+ return InitError(strprintf("Invalid non-hex (%s) minimum chain work value specified", minChainWorkStr));
+ }
+ nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr));
+ } else {
+ nMinimumChainWork = UintToArith256(chainparams.GetConsensus().nMinimumChainWork);
+ }
+ LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex());
+ if (nMinimumChainWork < UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
+ LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainparams.GetConsensus().nMinimumChainWork.GetHex());
+ }
+
// mempool limits
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t nMempoolSizeMin = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
@@ -1258,9 +1287,8 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
g_connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max())));
CConnman& connman = *g_connman;
- peerLogic.reset(new PeerLogicValidation(&connman));
+ peerLogic.reset(new PeerLogicValidation(&connman, scheduler));
RegisterValidationInterface(peerLogic.get());
- RegisterNodeSignals(GetNodeSignals());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
@@ -1651,6 +1679,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
connOptions.nMaxFeeler = 1;
connOptions.nBestHeight = chainActive.Height();
connOptions.uiInterface = &uiInterface;
+ connOptions.m_msgproc = peerLogic.get();
connOptions.nSendBufferMaxSize = 1000*gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
connOptions.nReceiveFloodSize = 1000*gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
diff --git a/src/miner.cpp b/src/miner.cpp
index 403d3d4e4a..9d571a2eb0 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -43,7 +43,6 @@
// its ancestors.
uint64_t nLastBlockTx = 0;
-uint64_t nLastBlockSize = 0;
uint64_t nLastBlockWeight = 0;
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
@@ -64,7 +63,6 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam
BlockAssembler::Options::Options() {
blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
- nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE;
}
BlockAssembler::BlockAssembler(const CChainParams& params, const Options& options) : chainparams(params)
@@ -72,10 +70,6 @@ BlockAssembler::BlockAssembler(const CChainParams& params, const Options& option
blockMinFeeRate = options.blockMinFeeRate;
// Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity:
nBlockMaxWeight = std::max<size_t>(4000, std::min<size_t>(MAX_BLOCK_WEIGHT - 4000, options.nBlockMaxWeight));
- // Limit size to between 1K and MAX_BLOCK_SERIALIZED_SIZE-1K for sanity:
- nBlockMaxSize = std::max<size_t>(1000, std::min<size_t>(MAX_BLOCK_SERIALIZED_SIZE - 1000, options.nBlockMaxSize));
- // Whether we need to account for byte usage (in addition to weight usage)
- fNeedSizeAccounting = (nBlockMaxSize < MAX_BLOCK_SERIALIZED_SIZE - 1000);
}
static BlockAssembler::Options DefaultOptions(const CChainParams& params)
@@ -85,20 +79,7 @@ static BlockAssembler::Options DefaultOptions(const CChainParams& params)
// If only one is given, only restrict the specified resource.
// If both are given, restrict both.
BlockAssembler::Options options;
- options.nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
- options.nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE;
- bool fWeightSet = false;
- if (gArgs.IsArgSet("-blockmaxweight")) {
- options.nBlockMaxWeight = gArgs.GetArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT);
- options.nBlockMaxSize = MAX_BLOCK_SERIALIZED_SIZE;
- fWeightSet = true;
- }
- if (gArgs.IsArgSet("-blockmaxsize")) {
- options.nBlockMaxSize = gArgs.GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
- if (!fWeightSet) {
- options.nBlockMaxWeight = options.nBlockMaxSize * WITNESS_SCALE_FACTOR;
- }
- }
+ options.nBlockMaxWeight = gArgs.GetArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT);
if (gArgs.IsArgSet("-blockmintxfee")) {
CAmount n = 0;
ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n);
@@ -116,7 +97,6 @@ void BlockAssembler::resetBlock()
inBlock.clear();
// Reserve space for coinbase tx
- nBlockSize = 1000;
nBlockWeight = 4000;
nBlockSigOpsCost = 400;
fIncludeWitness = false;
@@ -175,7 +155,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
int64_t nTime1 = GetTimeMicros();
nLastBlockTx = nBlockTx;
- nLastBlockSize = nBlockSize;
nLastBlockWeight = nBlockWeight;
// Create coinbase transaction.
@@ -190,8 +169,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus());
pblocktemplate->vTxFees[0] = -nFees;
- uint64_t nSerializeSize = GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION);
- LogPrintf("CreateNewBlock(): total size: %u block weight: %u txs: %u fees: %ld sigops %d\n", nSerializeSize, GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
+ LogPrintf("CreateNewBlock(): block weight: %u txs: %u fees: %ld sigops %d\n", GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
@@ -238,22 +216,13 @@ bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost
// - transaction finality (locktime)
// - premature witness (in case segwit transactions are added to mempool before
// segwit activation)
-// - serialized size (in case -blockmaxsize is in use)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package)
{
- uint64_t nPotentialBlockSize = nBlockSize; // only used with fNeedSizeAccounting
for (const CTxMemPool::txiter it : package) {
if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff))
return false;
if (!fIncludeWitness && it->GetTx().HasWitness())
return false;
- if (fNeedSizeAccounting) {
- uint64_t nTxSize = ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION);
- if (nPotentialBlockSize + nTxSize >= nBlockMaxSize) {
- return false;
- }
- nPotentialBlockSize += nTxSize;
- }
}
return true;
}
@@ -263,9 +232,6 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
pblock->vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
- if (fNeedSizeAccounting) {
- nBlockSize += ::GetSerializeSize(iter->GetTx(), SER_NETWORK, PROTOCOL_VERSION);
- }
nBlockWeight += iter->GetTxWeight();
++nBlockTx;
nBlockSigOpsCost += iter->GetSigOpCost();
diff --git a/src/miner.h b/src/miner.h
index 5c9cfd78f0..f68f77c0ba 100644
--- a/src/miner.h
+++ b/src/miner.h
@@ -139,13 +139,11 @@ private:
// Configuration parameters for the block size
bool fIncludeWitness;
- unsigned int nBlockMaxWeight, nBlockMaxSize;
- bool fNeedSizeAccounting;
+ unsigned int nBlockMaxWeight;
CFeeRate blockMinFeeRate;
// Information on the current status of the block
uint64_t nBlockWeight;
- uint64_t nBlockSize;
uint64_t nBlockTx;
uint64_t nBlockSigOpsCost;
CAmount nFees;
diff --git a/src/net.cpp b/src/net.cpp
index 599a6128fa..b3bc8292fc 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -89,10 +89,6 @@ std::string strSubVersion;
limitedmap<uint256, int64_t> mapAlreadyAskedFor(MAX_INV_SZ);
-// Signals for message handling
-static CNodeSignals g_signals;
-CNodeSignals& GetNodeSignals() { return g_signals; }
-
void CConnman::AddOneShot(const std::string& strDest)
{
LOCK(cs_vOneShots);
@@ -665,7 +661,7 @@ void CNode::copyStats(CNodeStats &stats)
X(cleanSubVer);
}
X(fInbound);
- X(fAddnode);
+ X(m_manual_connection);
X(nStartingHeight);
{
LOCK(cs_vSend);
@@ -1114,7 +1110,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true);
pnode->AddRef();
pnode->fWhitelisted = whitelisted;
- GetNodeSignals().InitializeNode(pnode, *this);
+ m_msgproc->InitializeNode(pnode);
LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
@@ -1674,6 +1670,37 @@ void CConnman::ProcessOneShot()
}
}
+bool CConnman::GetTryNewOutboundPeer()
+{
+ return m_try_another_outbound_peer;
+}
+
+void CConnman::SetTryNewOutboundPeer(bool flag)
+{
+ m_try_another_outbound_peer = flag;
+ LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n", flag ? "true" : "false");
+}
+
+// Return the number of peers we have over our outbound connection limit
+// Exclude peers that are marked for disconnect, or are going to be
+// disconnected soon (eg one-shots and feelers)
+// Also exclude peers that haven't finished initial connection handshake yet
+// (so that we don't decide we're over our desired connection limit, and then
+// evict some peer that has finished the handshake)
+int CConnman::GetExtraOutboundCount()
+{
+ int nOutbound = 0;
+ {
+ LOCK(cs_vNodes);
+ for (CNode* pnode : vNodes) {
+ if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) {
+ ++nOutbound;
+ }
+ }
+ }
+ return std::max(nOutbound - nMaxOutbound, 0);
+}
+
void CConnman::ThreadOpenConnections()
{
// Connect to specific addresses
@@ -1738,7 +1765,7 @@ void CConnman::ThreadOpenConnections()
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes) {
- if (!pnode->fInbound && !pnode->fAddnode) {
+ if (!pnode->fInbound && !pnode->m_manual_connection) {
// Count the peers that have all relevant services
if (pnode->fSuccessfullyConnected && !pnode->fFeeler && ((pnode->nServices & nRelevantServices) == nRelevantServices)) {
@@ -1768,7 +1795,8 @@ void CConnman::ThreadOpenConnections()
// * Only make a feeler connection once every few minutes.
//
bool fFeeler = false;
- if (nOutbound >= nMaxOutbound) {
+
+ if (nOutbound >= nMaxOutbound && !GetTryNewOutboundPeer()) {
int64_t nTime = GetTimeMicros(); // The current time right now (in microseconds).
if (nTime > nNextFeeler) {
nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL);
@@ -1934,7 +1962,7 @@ void CConnman::ThreadOpenAddedConnections()
}
// if successful, this moves the passed grant to the constructed node
-bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool fAddnode)
+bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection)
{
//
// Initiate outbound network connection
@@ -1963,10 +1991,10 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai
pnode->fOneShot = true;
if (fFeeler)
pnode->fFeeler = true;
- if (fAddnode)
- pnode->fAddnode = true;
+ if (manual_connection)
+ pnode->m_manual_connection = true;
- GetNodeSignals().InitializeNode(pnode, *this);
+ m_msgproc->InitializeNode(pnode);
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
@@ -1996,16 +2024,16 @@ void CConnman::ThreadMessageHandler()
continue;
// Receive messages
- bool fMoreNodeWork = GetNodeSignals().ProcessMessages(pnode, *this, flagInterruptMsgProc);
+ bool fMoreNodeWork = m_msgproc->ProcessMessages(pnode, flagInterruptMsgProc);
fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend);
if (flagInterruptMsgProc)
return;
-
// Send messages
{
LOCK(pnode->cs_sendProcessing);
- GetNodeSignals().SendMessages(pnode, *this, flagInterruptMsgProc);
+ m_msgproc->SendMessages(pnode, flagInterruptMsgProc);
}
+
if (flagInterruptMsgProc)
return;
}
@@ -2211,6 +2239,7 @@ CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In) : nSeed0(nSeed0In), nSe
semOutbound = nullptr;
semAddnode = nullptr;
flagInterruptMsgProc = false;
+ SetTryNewOutboundPeer(false);
Options connOptions;
Init(connOptions);
@@ -2324,6 +2353,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
//
// Start threads
//
+ assert(m_msgproc);
InterruptSocks5(false);
interruptNet.reset();
flagInterruptMsgProc = false;
@@ -2443,9 +2473,10 @@ void CConnman::DeleteNode(CNode* pnode)
{
assert(pnode);
bool fUpdateConnectionTime = false;
- GetNodeSignals().FinalizeNode(pnode->GetId(), fUpdateConnectionTime);
- if(fUpdateConnectionTime)
+ m_msgproc->FinalizeNode(pnode->GetId(), fUpdateConnectionTime);
+ if(fUpdateConnectionTime) {
addrman.Connected(pnode->addr);
+ }
delete pnode;
}
@@ -2704,7 +2735,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
strSubVer = "";
fWhitelisted = false;
fOneShot = false;
- fAddnode = false;
+ m_manual_connection = false;
fClient = false; // set by version message
fFeeler = false;
fSuccessfullyConnected = false;
diff --git a/src/net.h b/src/net.h
index a32736aa97..6b57d5cf79 100644
--- a/src/net.h
+++ b/src/net.h
@@ -33,7 +33,6 @@
#include <arpa/inet.h>
#endif
-#include <boost/signals2/signal.hpp>
class CScheduler;
class CNode;
@@ -116,7 +115,7 @@ struct CSerializedNetMsg
std::string command;
};
-
+class NetEventsInterface;
class CConnman
{
public:
@@ -138,6 +137,7 @@ public:
int nMaxFeeler = 0;
int nBestHeight = 0;
CClientUIInterface* uiInterface = nullptr;
+ NetEventsInterface* m_msgproc = nullptr;
unsigned int nSendBufferMaxSize = 0;
unsigned int nReceiveFloodSize = 0;
uint64_t nMaxOutboundTimeframe = 0;
@@ -156,6 +156,7 @@ public:
nMaxFeeler = connOptions.nMaxFeeler;
nBestHeight = connOptions.nBestHeight;
clientInterface = connOptions.uiInterface;
+ m_msgproc = connOptions.m_msgproc;
nSendBufferMaxSize = connOptions.nSendBufferMaxSize;
nReceiveFloodSize = connOptions.nReceiveFloodSize;
nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe;
@@ -170,7 +171,7 @@ public:
void Interrupt();
bool GetNetworkActive() const { return fNetworkActive; };
void SetNetworkActive(bool active);
- bool OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool fAddnode = false);
+ bool OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false);
bool CheckIncomingNonce(uint64_t nonce);
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
@@ -250,6 +251,19 @@ public:
void GetBanned(banmap_t &banmap);
void SetBanned(const banmap_t &banmap);
+ // This allows temporarily exceeding nMaxOutbound, with the goal of finding
+ // a peer that is better than all our current peers.
+ void SetTryNewOutboundPeer(bool flag);
+ bool GetTryNewOutboundPeer();
+
+ // Return the number of outbound peers we have in excess of our target (eg,
+ // if we previously called SetTryNewOutboundPeer(true), and have since set
+ // to false, we may have extra peers that we wish to disconnect). This may
+ // return a value less than (num_outbound_connections - num_outbound_slots)
+ // in cases where some outbound connections are not yet fully connected, or
+ // not yet fully disconnected.
+ int GetExtraOutboundCount();
+
bool AddNode(const std::string& node);
bool RemoveAddedNode(const std::string& node);
std::vector<AddedNodeInfo> GetAddedNodeInfo();
@@ -396,6 +410,7 @@ private:
int nMaxFeeler;
std::atomic<int> nBestHeight;
CClientUIInterface* clientInterface;
+ NetEventsInterface* m_msgproc;
/** SipHasher seeds for deterministic randomness */
const uint64_t nSeed0, nSeed1;
@@ -414,6 +429,13 @@ private:
std::thread threadOpenAddedConnections;
std::thread threadOpenConnections;
std::thread threadMessageHandler;
+
+ /** flag for deciding to connect to an extra outbound peer,
+ * in excess of nMaxOutbound
+ * This takes the place of a feeler connection */
+ std::atomic_bool m_try_another_outbound_peer;
+
+ friend struct CConnmanTest;
};
extern std::unique_ptr<CConnman> g_connman;
void Discover(boost::thread_group& threadGroup);
@@ -436,19 +458,18 @@ struct CombinerAll
}
};
-// Signals for message handling
-struct CNodeSignals
+/**
+ * Interface for message handling
+ */
+class NetEventsInterface
{
- boost::signals2::signal<bool (CNode*, CConnman&, std::atomic<bool>&), CombinerAll> ProcessMessages;
- boost::signals2::signal<bool (CNode*, CConnman&, std::atomic<bool>&), CombinerAll> SendMessages;
- boost::signals2::signal<void (CNode*, CConnman&)> InitializeNode;
- boost::signals2::signal<void (NodeId, bool&)> FinalizeNode;
+public:
+ virtual bool ProcessMessages(CNode* pnode, std::atomic<bool>& interrupt) = 0;
+ virtual bool SendMessages(CNode* pnode, std::atomic<bool>& interrupt) = 0;
+ virtual void InitializeNode(CNode* pnode) = 0;
+ virtual void FinalizeNode(NodeId id, bool& update_connection_time) = 0;
};
-
-CNodeSignals& GetNodeSignals();
-
-
enum
{
LOCAL_NONE, // unknown
@@ -508,7 +529,7 @@ public:
int nVersion;
std::string cleanSubVer;
bool fInbound;
- bool fAddnode;
+ bool m_manual_connection;
int nStartingHeight;
uint64_t nSendBytes;
mapMsgCmdSize mapSendBytesPerMsgCmd;
@@ -618,7 +639,7 @@ public:
bool fWhitelisted; // This peer can bypass DoS banning.
bool fFeeler; // If true this node is being used as a short lived feeler.
bool fOneShot;
- bool fAddnode;
+ bool m_manual_connection;
bool fClient;
const bool fInbound;
std::atomic_bool fSuccessfullyConnected;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index e09b56a631..dea4b042e6 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -23,6 +23,7 @@
#include "primitives/transaction.h"
#include "random.h"
#include "reverse_iterator.h"
+#include "scheduler.h"
#include "tinyformat.h"
#include "txmempool.h"
#include "ui_interface.h"
@@ -116,6 +117,12 @@ namespace {
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads = 0;
+ /** Number of outbound peers with m_chain_sync.m_protect. */
+ int g_outbound_peers_with_protect_from_disconnect = 0;
+
+ /** When our tip was last updated. */
+ int64_t g_last_tip_update = 0;
+
/** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay;
@@ -123,11 +130,6 @@ namespace {
std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
} // namespace
-//////////////////////////////////////////////////////////////////////////////
-//
-// Registration of network node signals.
-//
-
namespace {
struct CBlockReject {
@@ -198,6 +200,36 @@ struct CNodeState {
*/
bool fSupportsDesiredCmpctVersion;
+ /** State used to enforce CHAIN_SYNC_TIMEOUT
+ * Only in effect for outbound, non-manual connections, with
+ * m_protect == false
+ * Algorithm: if a peer's best known block has less work than our tip,
+ * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
+ * - If at timeout their best known block now has more work than our tip
+ * when the timeout was set, then either reset the timeout or clear it
+ * (after comparing against our current tip's work)
+ * - If at timeout their best known block still has less work than our
+ * tip did when the timeout was set, then send a getheaders message,
+ * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
+ * If their best known block is still behind when that new timeout is
+ * reached, disconnect.
+ */
+ struct ChainSyncTimeoutState {
+ //! A timeout used for checking whether our peer has sufficiently synced
+ int64_t m_timeout;
+ //! A header with the work we require on our peer's chain
+ const CBlockIndex * m_work_header;
+ //! After timeout is reached, set to true after sending getheaders
+ bool m_sent_getheaders;
+ //! Whether this peer is protected from disconnection due to a bad/slow chain
+ bool m_protect;
+ };
+
+ ChainSyncTimeoutState m_chain_sync;
+
+ //! Time of last new block announcement
+ int64_t m_last_block_announcement;
+
CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
fCurrentlyConnected = false;
nMisbehavior = 0;
@@ -220,6 +252,8 @@ struct CNodeState {
fHaveWitness = false;
fWantsCmpctWitness = false;
fSupportsDesiredCmpctVersion = false;
+ m_chain_sync = { 0, nullptr, false, false };
+ m_last_block_announcement = 0;
}
};
@@ -244,7 +278,7 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state)
nPreferredDownload += state->fPreferredDownload;
}
-void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
+void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
{
ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
uint64_t nonce = pnode->GetLocalNonce();
@@ -255,7 +289,7 @@ void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
CAddress addrMe = CAddress(CService(), nLocalNodeServices);
- connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
+ connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
if (fLogIPs) {
@@ -265,49 +299,6 @@ void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
}
}
-void InitializeNode(CNode *pnode, CConnman& connman) {
- CAddress addr = pnode->addr;
- std::string addrName = pnode->GetAddrName();
- NodeId nodeid = pnode->GetId();
- {
- LOCK(cs_main);
- mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
- }
- if(!pnode->fInbound)
- PushNodeVersion(pnode, connman, GetTime());
-}
-
-void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
- fUpdateConnectionTime = false;
- LOCK(cs_main);
- CNodeState *state = State(nodeid);
-
- if (state->fSyncStarted)
- nSyncStarted--;
-
- if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
- fUpdateConnectionTime = true;
- }
-
- for (const QueuedBlock& entry : state->vBlocksInFlight) {
- mapBlocksInFlight.erase(entry.hash);
- }
- EraseOrphansFor(nodeid);
- nPreferredDownload -= state->fPreferredDownload;
- nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
- assert(nPeersWithValidatedDownloads >= 0);
-
- mapNodeState.erase(nodeid);
-
- if (mapNodeState.empty()) {
- // Do a consistency check after the last peer is removed.
- assert(mapBlocksInFlight.empty());
- assert(nPreferredDownload == 0);
- assert(nPeersWithValidatedDownloads == 0);
- }
- LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
-}
-
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
// Also used if a block was /not/ received and timed out or started with another peer
@@ -402,7 +393,7 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
}
}
-void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) {
+void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) {
AssertLockHeld(cs_main);
CNodeState* nodestate = State(nodeid);
if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
@@ -417,26 +408,35 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) {
return;
}
}
- connman.ForNode(nodeid, [&connman](CNode* pfrom){
+ connman->ForNode(nodeid, [connman](CNode* pfrom){
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings.
- connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
- connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
+ connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
return true;
});
lNodesAnnouncingHeaderAndIDs.pop_front();
}
fAnnounceUsingCMPCTBLOCK = true;
- connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
return true;
});
}
}
+bool TipMayBeStale(const Consensus::Params &consensusParams)
+{
+ AssertLockHeld(cs_main);
+ if (g_last_tip_update == 0) {
+ g_last_tip_update = GetTime();
+ }
+ return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
+}
+
// Requires cs_main
bool CanDirectFetch(const Consensus::Params &consensusParams)
{
@@ -466,7 +466,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
- if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < UintToArith256(consensusParams.nMinimumChainWork)) {
+ if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
// This peer has nothing interesting.
return;
}
@@ -543,6 +543,69 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con
} // namespace
+// This function is used for testing the stale tip eviction logic, see
+// DoS_tests.cpp
+void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
+{
+ LOCK(cs_main);
+ CNodeState *state = State(node);
+ if (state) state->m_last_block_announcement = time_in_seconds;
+}
+
+// Returns true for outbound peers, excluding manual connections, feelers, and
+// one-shots
+bool IsOutboundDisconnectionCandidate(const CNode *node)
+{
+ return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
+}
+
+void PeerLogicValidation::InitializeNode(CNode *pnode) {
+ CAddress addr = pnode->addr;
+ std::string addrName = pnode->GetAddrName();
+ NodeId nodeid = pnode->GetId();
+ {
+ LOCK(cs_main);
+ mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
+ }
+ if(!pnode->fInbound)
+ PushNodeVersion(pnode, connman, GetTime());
+}
+
+void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
+ fUpdateConnectionTime = false;
+ LOCK(cs_main);
+ CNodeState *state = State(nodeid);
+ assert(state != nullptr);
+
+ if (state->fSyncStarted)
+ nSyncStarted--;
+
+ if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
+ fUpdateConnectionTime = true;
+ }
+
+ for (const QueuedBlock& entry : state->vBlocksInFlight) {
+ mapBlocksInFlight.erase(entry.hash);
+ }
+ EraseOrphansFor(nodeid);
+ nPreferredDownload -= state->fPreferredDownload;
+ nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
+ assert(nPeersWithValidatedDownloads >= 0);
+ g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
+ assert(g_outbound_peers_with_protect_from_disconnect >= 0);
+
+ mapNodeState.erase(nodeid);
+
+ if (mapNodeState.empty()) {
+ // Do a consistency check after the last peer is removed.
+ assert(mapBlocksInFlight.empty());
+ assert(nPreferredDownload == 0);
+ assert(nPeersWithValidatedDownloads == 0);
+ assert(g_outbound_peers_with_protect_from_disconnect == 0);
+ }
+ LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
+}
+
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
@@ -558,22 +621,6 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
return true;
}
-void RegisterNodeSignals(CNodeSignals& nodeSignals)
-{
- nodeSignals.ProcessMessages.connect(&ProcessMessages);
- nodeSignals.SendMessages.connect(&SendMessages);
- nodeSignals.InitializeNode.connect(&InitializeNode);
- nodeSignals.FinalizeNode.connect(&FinalizeNode);
-}
-
-void UnregisterNodeSignals(CNodeSignals& nodeSignals)
-{
- nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
- nodeSignals.SendMessages.disconnect(&SendMessages);
- nodeSignals.InitializeNode.disconnect(&InitializeNode);
- nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
-}
-
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
@@ -725,9 +772,17 @@ void Misbehaving(NodeId pnode, int howmuch)
// blockchain -> download logic notification
//
-PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
+PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &scheduler) : connman(connmanIn), m_stale_tip_check_time(0) {
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+ // Stale tip checking and peer eviction are on two different timers, but we
+ // don't want them to get out of sync due to drift in the scheduler, so we
+ // combine them in one function and schedule at the quicker (peer-eviction)
+ // timer.
+ static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
+ scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000);
}
void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
@@ -758,6 +813,8 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb
}
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
+
+ g_last_tip_update = GetTime();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
@@ -865,7 +922,7 @@ void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationSta
!IsInitialBlockDownload() &&
mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
if (it != mapBlockSource.end()) {
- MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman);
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
}
}
if (it != mapBlockSource.end())
@@ -910,16 +967,16 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
return true;
}
-static void RelayTransaction(const CTransaction& tx, CConnman& connman)
+static void RelayTransaction(const CTransaction& tx, CConnman* connman)
{
CInv inv(MSG_TX, tx.GetHash());
- connman.ForEachNode([&inv](CNode* pnode)
+ connman->ForEachNode([&inv](CNode* pnode)
{
pnode->PushInventory(inv);
});
}
-static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman)
+static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
{
unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
@@ -927,7 +984,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connma
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
uint64_t hashAddr = addr.GetHash();
- const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
+ const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
FastRandomContext insecure_rand;
std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
@@ -952,10 +1009,10 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connma
}
};
- connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
+ connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
-void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
std::vector<CInv> vNotFound;
@@ -1017,7 +1074,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
- if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
+ if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
@@ -1040,9 +1097,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
pblock = pblockRead;
}
if (inv.type == MSG_BLOCK)
- connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_WITNESS_BLOCK)
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_FILTERED_BLOCK)
{
bool sendMerkleBlock = false;
@@ -1055,7 +1112,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
}
if (sendMerkleBlock) {
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
@@ -1064,7 +1121,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
for (PairType& pair : merkleBlock.vMatchedTxn)
- connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
}
// else
// no response
@@ -1079,13 +1136,13 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
} else {
CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
} else {
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
}
}
@@ -1097,7 +1154,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// wait for other stuff first.
std::vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
pfrom->hashContinue.SetNull();
}
}
@@ -1109,14 +1166,14 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
auto mi = mapRelay.find(inv.hash);
int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
if (mi != mapRelay.end()) {
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
push = true;
} else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
push = true;
}
}
@@ -1143,7 +1200,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
}
}
@@ -1155,7 +1212,7 @@ uint32_t GetFetchFlags(CNode* pfrom) {
return nFetchFlags;
}
-inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman& connman) {
+inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
@@ -1169,10 +1226,229 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
LOCK(cs_main);
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
}
-bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
+bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid)
+{
+ const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ size_t nCount = headers.size();
+
+ if (nCount == 0) {
+ // Nothing interesting. Stop asking this peers for more headers.
+ return true;
+ }
+
+ bool received_new_header = false;
+ const CBlockIndex *pindexLast = nullptr;
+ {
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom->GetId());
+
+ // If this looks like it could be a block announcement (nCount <
+ // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
+ // don't connect:
+ // - Send a getheaders message in response to try to connect the chain.
+ // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
+ // don't connect before giving DoS points
+ // - Once a headers message is received that is valid and does connect,
+ // nUnconnectingHeaders gets reset back to 0.
+ if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
+ nodestate->nUnconnectingHeaders++;
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ headers[0].GetHash().ToString(),
+ headers[0].hashPrevBlock.ToString(),
+ pindexBestHeader->nHeight,
+ pfrom->GetId(), nodestate->nUnconnectingHeaders);
+ // Set hashLastUnknownBlock for this peer, so that if we
+ // eventually get the headers - even from a different peer -
+ // we can use this peer to download.
+ UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
+
+ if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
+ Misbehaving(pfrom->GetId(), 20);
+ }
+ return true;
+ }
+
+ uint256 hashLastBlock;
+ for (const CBlockHeader& header : headers) {
+ if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
+ Misbehaving(pfrom->GetId(), 20);
+ return error("non-continuous headers sequence");
+ }
+ hashLastBlock = header.GetHash();
+ }
+
+ // If we don't have the last header, then they'll have given us
+ // something new (if these headers are valid).
+ if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) {
+ received_new_header = true;
+ }
+ }
+
+ CValidationState state;
+ CBlockHeader first_invalid_header;
+ if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) {
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ LOCK(cs_main);
+ if (nDoS > 0) {
+ Misbehaving(pfrom->GetId(), nDoS);
+ }
+ if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) {
+ // Goal: don't allow outbound peers to use up our outbound
+ // connection slots if they are on incompatible chains.
+ //
+ // We ask the caller to set punish_invalid appropriately based
+ // on the peer and the method of header delivery (compact
+ // blocks are allowed to be invalid in some circumstances,
+ // under BIP 152).
+ // Here, we try to detect the narrow situation that we have a
+ // valid block header (ie it was valid at the time the header
+ // was received, and hence stored in mapBlockIndex) but know the
+ // block is invalid, and that a peer has announced that same
+ // block as being on its active chain.
+ // Disconnect the peer in such a situation.
+ //
+ // Note: if the header that is invalid was not accepted to our
+ // mapBlockIndex at all, that may also be grounds for
+ // disconnecting the peer, as the chain they are on is likely
+ // to be incompatible. However, there is a circumstance where
+ // that does not hold: if the header's timestamp is more than
+ // 2 hours ahead of our current time. In that case, the header
+ // may become valid in the future, and we don't want to
+ // disconnect a peer merely for serving us one too-far-ahead
+ // block header, to prevent an attacker from splitting the
+ // network by mining a block right at the 2 hour boundary.
+ //
+ // TODO: update the DoS logic (or, rather, rewrite the
+ // DoS-interface between validation and net_processing) so that
+ // the interface is cleaner, and so that we disconnect on all the
+ // reasons that a peer's headers chain is incompatible
+ // with ours (eg block->nVersion softforks, MTP violations,
+ // etc), and not just the duplicate-invalid case.
+ pfrom->fDisconnect = true;
+ }
+ return error("invalid header received");
+ }
+ }
+
+ {
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom->GetId());
+ if (nodestate->nUnconnectingHeaders > 0) {
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
+ }
+ nodestate->nUnconnectingHeaders = 0;
+
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
+
+ // From here, pindexBestKnownBlock should be guaranteed to be non-null,
+ // because it is set in UpdateBlockAvailability. Some nullptr checks
+ // are still present, however, as belt-and-suspenders.
+
+ if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
+ nodestate->m_last_block_announcement = GetTime();
+ }
+
+ if (nCount == MAX_HEADERS_RESULTS) {
+ // Headers message had its maximum size; the peer may have more headers.
+ // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
+ // from there instead.
+ LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
+ }
+
+ bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
+ // If this set of headers is valid and ends in a block with at least as
+ // much work as our tip, download as much as possible.
+ if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
+ std::vector<const CBlockIndex*> vToFetch;
+ const CBlockIndex *pindexWalk = pindexLast;
+ // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
+ while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
+ !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
+ (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
+ // We don't have this block, and it's not yet in flight.
+ vToFetch.push_back(pindexWalk);
+ }
+ pindexWalk = pindexWalk->pprev;
+ }
+ // If pindexWalk still isn't on our main chain, we're looking at a
+ // very large reorg at a time we think we're close to caught up to
+ // the main chain -- this shouldn't really happen. Bail out on the
+ // direct fetch and rely on parallel download instead.
+ if (!chainActive.Contains(pindexWalk)) {
+ LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
+ pindexLast->GetBlockHash().ToString(),
+ pindexLast->nHeight);
+ } else {
+ std::vector<CInv> vGetData;
+ // Download as much as possible, from earliest to latest.
+ for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
+ if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ // Can't download any more from this peer
+ break;
+ }
+ uint32_t nFetchFlags = GetFetchFlags(pfrom);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ pindex->GetBlockHash().ToString(), pfrom->GetId());
+ }
+ if (vGetData.size() > 1) {
+ LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
+ }
+ if (vGetData.size() > 0) {
+ if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
+ }
+ }
+ // If we're in IBD, we want outbound peers that will serve us a useful
+ // chain. Disconnect peers that are on chains with insufficient work.
+ if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
+ // When nCount < MAX_HEADERS_RESULTS, we know we have no more
+ // headers to fetch from this peer.
+ if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
+ // This peer has too little work on their headers chain to help
+ // us sync -- disconnect if using an outbound slot (unless
+ // whitelisted or addnode).
+ // Note: We compare their tip to nMinimumChainWork (rather than
+ // chainActive.Tip()) because we won't start block download
+ // until we have a headers chain that has at least
+ // nMinimumChainWork, even if a peer has a chain past our tip,
+ // as an anti-DoS measure.
+ if (IsOutboundDisconnectionCandidate(pfrom)) {
+ LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
+ pfrom->fDisconnect = true;
+ }
+ }
+ }
+
+ if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) {
+ // If this is an outbound peer, check to see if we should protect
+ // it from the bad/lagging chain logic.
+ if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
+ LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
+ nodestate->m_chain_sync.m_protect = true;
+ ++g_outbound_peers_with_protect_from_disconnect;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
@@ -1225,7 +1501,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
- connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
+ connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
@@ -1249,12 +1525,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
nServices = ServiceFlags(nServiceInt);
if (!pfrom->fInbound)
{
- connman.SetServices(pfrom->addr, nServices);
+ connman->SetServices(pfrom->addr, nServices);
}
if (pfrom->nServicesExpected & ~nServices)
{
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, pfrom->nServicesExpected);
- connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
+ connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
pfrom->fDisconnect = true;
return false;
@@ -1275,7 +1551,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
- connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
+ connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
pfrom->fDisconnect = true;
return false;
@@ -1295,7 +1571,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (!vRecv.empty())
vRecv >> fRelay;
// Disconnect if we connected to ourself
- if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce))
+ if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
pfrom->fDisconnect = true;
@@ -1311,7 +1587,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (pfrom->fInbound)
PushNodeVersion(pfrom, connman, GetAdjustedTime());
- connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
+ connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
pfrom->nServices = nServices;
pfrom->SetAddrLocal(addrMe);
@@ -1362,12 +1638,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
// Get recent addresses
- if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
+ if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
{
- connman.PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
+ connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
pfrom->fGetAddr = true;
}
- connman.MarkAddressGood(pfrom->addr);
+ connman->MarkAddressGood(pfrom->addr);
}
std::string remoteAddr;
@@ -1386,7 +1662,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// If the peer is old enough to have the old alert system, send it the final alert.
if (pfrom->nVersion <= 70012) {
CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
- connman.PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
+ connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
}
// Feeler connections exist only to verify if address is online.
@@ -1424,7 +1700,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
}
if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks
@@ -1435,9 +1711,9 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2;
if (pfrom->GetLocalServices() & NODE_WITNESS)
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
nCMPCTBLOCKVersion = 1;
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
}
pfrom->fSuccessfullyConnected = true;
}
@@ -1456,7 +1732,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
- if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000)
+ if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
return true;
if (vAddr.size() > 1000)
{
@@ -1490,7 +1766,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (fReachable)
vAddrOk.push_back(addr);
}
- connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
+ connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
@@ -1568,7 +1844,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// fell back to inv we probably have a reorg which we should get the headers for first,
// we now only provide a getheaders response here. When we receive the headers, we will
// then ask for the blocks we need.
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
}
}
@@ -1774,7 +2050,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// will re-announce the new block via headers (or compact blocks again)
// in the SendMessages logic.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
}
@@ -1955,7 +2231,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
pfrom->GetId(),
FormatStateMessage(state));
if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
if (nDoS > 0) {
Misbehaving(pfrom->GetId(), nDoS);
@@ -1969,15 +2245,21 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CBlockHeaderAndShortTxIDs cmpctblock;
vRecv >> cmpctblock;
+ bool received_new_header = false;
+
{
LOCK(cs_main);
if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!IsInitialBlockDownload())
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
return true;
}
+
+ if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) {
+ received_new_header = true;
+ }
}
const CBlockIndex *pindex = nullptr;
@@ -2004,7 +2286,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// If we end up treating this as a plain headers message, call that as well
// without cs_main.
bool fRevertToHeaderProcessing = false;
- CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// below)
@@ -2017,6 +2298,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
+ CNodeState *nodestate = State(pfrom->GetId());
+
+ // If this was a new header with more work than our tip, update the
+ // peer's last block announcement time
+ if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
+ nodestate->m_last_block_announcement = GetTime();
+ }
+
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
@@ -2030,7 +2319,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// so we just grab the block via normal getdata
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
}
return true;
}
@@ -2039,8 +2328,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
return true;
- CNodeState *nodestate = State(pfrom->GetId());
-
if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
// Don't bother trying to process compact blocks from v1 peers
// after segwit activates.
@@ -2074,7 +2361,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Duplicate txindexes, the block is now in-flight, so just request it
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return true;
}
@@ -2091,7 +2378,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
fProcessBLOCKTXN = true;
} else {
req.blockhash = pindex->GetBlockHash();
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
}
} else {
// This block is either already in flight from a different
@@ -2117,14 +2404,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return true;
} else {
// If this was an announce-cmpctblock, we want the same treatment as a header message
- // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
- std::vector<CBlock> headers;
- headers.push_back(cmpctblock.header);
- vHeadersMsg << headers;
fRevertToHeaderProcessing = true;
}
}
@@ -2133,8 +2416,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (fProcessBLOCKTXN)
return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
- if (fRevertToHeaderProcessing)
- return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
+ if (fRevertToHeaderProcessing) {
+ // Headers received from HB compact block peers are permitted to be
+ // relayed before full validation (see BIP 152), so we don't want to disconnect
+ // the peer if the header turns out to be for an invalid block.
+ // Note that if a peer tries to build on an invalid chain, that
+ // will be detected and the peer will be banned.
+ return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false);
+ }
if (fBlockReconstructed) {
// If we got here, we were able to optimistically reconstruct a
@@ -2144,7 +2433,16 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
}
bool fNewBlock = false;
- ProcessNewBlock(chainparams, pblock, true, &fNewBlock);
+ // Setting fForceProcessing to true means that we bypass some of
+ // our anti-DoS protections in AcceptBlock, which filters
+ // unrequested blocks that might be trying to waste our resources
+ // (eg disk space). Because we only try to reconstruct blocks when
+ // we're close to caught up (via the CanDirectFetch() requirement
+ // above, combined with the behavior of not requesting blocks until
+ // we have a chain with at least nMinimumChainWork), and we ignore
+ // compact blocks with less work than our tip, it is safe to treat
+ // reconstructed compact blocks as having been requested.
+ ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
@@ -2191,7 +2489,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Might have collided, fall back to getdata now :(
std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
} else {
// Block is either okay, or possibly we received
// READ_STATUS_CHECKBLOCK_FAILED.
@@ -2224,7 +2522,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fNewBlock = false;
// Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
// even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
- ProcessNewBlock(chainparams, pblock, true, &fNewBlock);
+ // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
+ // disk-space attacks), but this should be safe due to the
+ // protections in the compact block handler -- see related comment
+ // in compact block optimistic reconstruction handling.
+ ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
@@ -2252,136 +2554,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
- if (nCount == 0) {
- // Nothing interesting. Stop asking this peers for more headers.
- return true;
- }
-
- const CBlockIndex *pindexLast = nullptr;
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom->GetId());
-
- // If this looks like it could be a block announcement (nCount <
- // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
- // don't connect:
- // - Send a getheaders message in response to try to connect the chain.
- // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
- // don't connect before giving DoS points
- // - Once a headers message is received that is valid and does connect,
- // nUnconnectingHeaders gets reset back to 0.
- if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
- nodestate->nUnconnectingHeaders++;
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
- LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
- headers[0].GetHash().ToString(),
- headers[0].hashPrevBlock.ToString(),
- pindexBestHeader->nHeight,
- pfrom->GetId(), nodestate->nUnconnectingHeaders);
- // Set hashLastUnknownBlock for this peer, so that if we
- // eventually get the headers - even from a different peer -
- // we can use this peer to download.
- UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
-
- if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(pfrom->GetId(), 20);
- }
- return true;
- }
-
- uint256 hashLastBlock;
- for (const CBlockHeader& header : headers) {
- if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
- Misbehaving(pfrom->GetId(), 20);
- return error("non-continuous headers sequence");
- }
- hashLastBlock = header.GetHash();
- }
- }
-
- CValidationState state;
- if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
- int nDoS;
- if (state.IsInvalid(nDoS)) {
- if (nDoS > 0) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), nDoS);
- }
- return error("invalid header received");
- }
- }
-
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom->GetId());
- if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
- }
- nodestate->nUnconnectingHeaders = 0;
-
- assert(pindexLast);
- UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
-
- if (nCount == MAX_HEADERS_RESULTS) {
- // Headers message had its maximum size; the peer may have more headers.
- // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
- // from there instead.
- LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
- }
-
- bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
- // If this set of headers is valid and ends in a block with at least as
- // much work as our tip, download as much as possible.
- if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
- std::vector<const CBlockIndex*> vToFetch;
- const CBlockIndex *pindexWalk = pindexLast;
- // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
- while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
- (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
- // We don't have this block, and it's not yet in flight.
- vToFetch.push_back(pindexWalk);
- }
- pindexWalk = pindexWalk->pprev;
- }
- // If pindexWalk still isn't on our main chain, we're looking at a
- // very large reorg at a time we think we're close to caught up to
- // the main chain -- this shouldn't really happen. Bail out on the
- // direct fetch and rely on parallel download instead.
- if (!chainActive.Contains(pindexWalk)) {
- LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
- pindexLast->GetBlockHash().ToString(),
- pindexLast->nHeight);
- } else {
- std::vector<CInv> vGetData;
- // Download as much as possible, from earliest to latest.
- for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- // Can't download any more from this peer
- break;
- }
- uint32_t nFetchFlags = GetFetchFlags(pfrom);
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom->GetId());
- }
- if (vGetData.size() > 1) {
- LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
- pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
- }
- if (vGetData.size() > 0) {
- if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // In any case, we want to download using a compact block, not a regular one
- vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
- }
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- }
- }
- }
- }
+ // Headers received via a HEADERS message should be valid, and reflect
+ // the chain the peer is on. If we receive a known-invalid header,
+ // disconnect the peer if it is using one of our outbound connection
+ // slots.
+ bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection;
+ return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish);
}
else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
@@ -2391,11 +2569,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
- // Process all blocks from whitelisted peers, even if not requested,
- // unless we're still syncing with the network.
- // Such an unrequested block may still be processed, subject to the
- // conditions in AcceptBlock().
- bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
+ bool forceProcessing = false;
const uint256 hash(pblock->GetHash());
{
LOCK(cs_main);
@@ -2438,7 +2612,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
pfrom->fSentAddr = true;
pfrom->vAddrToSend.clear();
- std::vector<CAddress> vAddr = connman.GetAddresses();
+ std::vector<CAddress> vAddr = connman->GetAddresses();
FastRandomContext insecure_rand;
for (const CAddress &addr : vAddr)
pfrom->PushAddress(addr, insecure_rand);
@@ -2454,7 +2628,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return true;
}
- if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
+ if (connman->OutboundTargetReached(false) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
@@ -2483,7 +2657,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
}
}
@@ -2629,13 +2803,13 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return true;
}
-static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman& connman)
+static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman* connman)
{
AssertLockHeld(cs_main);
CNodeState &state = *State(pnode->GetId());
for (const CBlockReject& reject : state.rejects) {
- connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
+ connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
}
state.rejects.clear();
@@ -2643,7 +2817,7 @@ static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman& connman)
state.fShouldBan = false;
if (pnode->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString());
- else if (pnode->fAddnode)
+ else if (pnode->m_manual_connection)
LogPrintf("Warning: not punishing addnoded peer %s!\n", pnode->addr.ToString());
else {
pnode->fDisconnect = true;
@@ -2651,7 +2825,7 @@ static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman& connman)
LogPrintf("Warning: not banning local peer %s!\n", pnode->addr.ToString());
else
{
- connman.Ban(pnode->addr, BanReasonNodeMisbehaving);
+ connman->Ban(pnode->addr, BanReasonNodeMisbehaving);
}
}
return true;
@@ -2659,7 +2833,7 @@ static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman& connman)
return false;
}
-bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
+bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
{
const CChainParams& chainparams = Params();
//
@@ -2693,7 +2867,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
- pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman.GetReceiveFloodSize();
+ pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
fMoreWork = !pfrom->vProcessMsg.empty();
}
CNetMessage& msg(msgs.front());
@@ -2742,7 +2916,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
}
catch (const std::ios_base::failure& e)
{
- connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message")));
+ connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message")));
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
@@ -2779,6 +2953,135 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
return fMoreWork;
}
+void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
+{
+ AssertLockHeld(cs_main);
+
+ CNodeState &state = *State(pto->GetId());
+ const CNetMsgMaker msgMaker(pto->GetSendVersion());
+
+ if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
+ // This is an outbound peer subject to disconnection if they don't
+ // announce a block with as much work as the current tip within
+ // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
+ // their chain has more work than ours, we should sync to it,
+ // unless it's invalid, in which case we should find that out and
+ // disconnect from them elsewhere).
+ if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) {
+ if (state.m_chain_sync.m_timeout != 0) {
+ state.m_chain_sync.m_timeout = 0;
+ state.m_chain_sync.m_work_header = nullptr;
+ state.m_chain_sync.m_sent_getheaders = false;
+ }
+ } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
+ // Our best block known by this peer is behind our tip, and we're either noticing
+ // that for the first time, OR this peer was able to catch up to some earlier point
+ // where we checked against our tip.
+ // Either way, set a new timeout based on current tip.
+ state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
+ state.m_chain_sync.m_work_header = chainActive.Tip();
+ state.m_chain_sync.m_sent_getheaders = false;
+ } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
+ // No evidence yet that our peer has synced to a chain with work equal to that
+ // of our tip, when we first detected it was behind. Send a single getheaders
+ // message to give the peer a chance to update us.
+ if (state.m_chain_sync.m_sent_getheaders) {
+ // They've run out of time to catch up!
+ LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
+ pto->fDisconnect = true;
+ } else {
+ LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
+ state.m_chain_sync.m_sent_getheaders = true;
+ constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
+ // Bump the timeout to allow a response, which could clear the timeout
+ // (if the response shows the peer has synced), reset the timeout (if
+ // the peer syncs to the required work but not to our tip), or result
+ // in disconnect (if we advance to the timeout and pindexBestKnownBlock
+ // has not sufficiently progressed)
+ state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
+ }
+ }
+ }
+}
+
+void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
+{
+ // Check whether we have too many outbound peers
+ int extra_peers = connman->GetExtraOutboundCount();
+ if (extra_peers > 0) {
+ // If we have more outbound peers than we target, disconnect one.
+ // Pick the outbound peer that least recently announced
+ // us a new block, with ties broken by choosing the more recent
+ // connection (higher node id)
+ NodeId worst_peer = -1;
+ int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
+
+ LOCK(cs_main);
+
+ connman->ForEachNode([&](CNode* pnode) {
+ // Ignore non-outbound peers, or nodes marked for disconnect already
+ if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
+ CNodeState *state = State(pnode->GetId());
+ if (state == nullptr) return; // shouldn't be possible, but just in case
+ // Don't evict our protected peers
+ if (state->m_chain_sync.m_protect) return;
+ if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
+ worst_peer = pnode->GetId();
+ oldest_block_announcement = state->m_last_block_announcement;
+ }
+ });
+ if (worst_peer != -1) {
+ bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
+ // Only disconnect a peer that has been connected to us for
+ // some reasonable fraction of our check-frequency, to give
+ // it time for new information to have arrived.
+ // Also don't disconnect any peer we're trying to download a
+ // block from.
+ CNodeState &state = *State(pnode->GetId());
+ if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
+ LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
+ pnode->fDisconnect = true;
+ return true;
+ } else {
+ LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
+ return false;
+ }
+ });
+ if (disconnected) {
+ // If we disconnected an extra peer, that means we successfully
+ // connected to at least one peer after the last time we
+ // detected a stale tip. Don't try any more extra peers until
+ // we next detect a stale tip, to limit the load we put on the
+ // network from these extra connections.
+ connman->SetTryNewOutboundPeer(false);
+ }
+ }
+ }
+}
+
+void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
+{
+ if (connman == nullptr) return;
+
+ int64_t time_in_seconds = GetTime();
+
+ EvictExtraOutboundPeers(time_in_seconds);
+
+ if (time_in_seconds > m_stale_tip_check_time) {
+ LOCK(cs_main);
+ // Check whether our tip is stale, and if so, allow using an extra
+ // outbound peer
+ if (TipMayBeStale(consensusParams)) {
+ LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
+ connman->SetTryNewOutboundPeer(true);
+ } else if (connman->GetTryNewOutboundPeer()) {
+ connman->SetTryNewOutboundPeer(false);
+ }
+ m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
+ }
+}
+
class CompareInvMempoolOrder
{
CTxMemPool *mp;
@@ -2796,7 +3099,7 @@ public:
}
};
-bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
+bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptMsgProc)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
@@ -2828,11 +3131,11 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
}
}
@@ -2867,14 +3170,14 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
// we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit();
@@ -2901,7 +3204,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
}
}
@@ -2910,7 +3213,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
- GetMainSignals().Broadcast(nTimeBestReceived, &connman);
+ GetMainSignals().Broadcast(nTimeBestReceived, connman);
}
//
@@ -2994,10 +3297,10 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
LOCK(cs_most_recent_block);
if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
- connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
+ connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
else {
CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
- connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
fGotBlockFromCache = true;
}
@@ -3007,7 +3310,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
assert(ret);
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
- connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
@@ -3020,7 +3323,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->GetId());
}
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
state.pindexBestHeaderSent = pBestIndex;
} else
fRevertToInv = true;
@@ -3066,7 +3369,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
for (const uint256& hash : pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
@@ -3112,7 +3415,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
@@ -3178,7 +3481,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
}
}
if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
pto->filterInventoryKnown.insert(hash);
@@ -3186,7 +3489,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
}
}
if (!vInv.empty())
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
nNow = GetTimeMicros();
@@ -3245,6 +3548,9 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
}
}
+ // Check that outbound peers have reasonable chains
+ // GetTime() is used by this anti-DoS logic so we can test this using mocktime
+ ConsiderEviction(pto, GetTime());
//
// Message: getdata (blocks)
@@ -3281,7 +3587,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
vGetData.clear();
}
} else {
@@ -3291,7 +3597,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
//
// Message: feefilter
@@ -3308,7 +3614,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
if (filterToSend != pto->lastSentFeeFilter) {
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
+ connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->lastSentFeeFilter = filterToSend;
}
pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
diff --git a/src/net_processing.h b/src/net_processing.h
index db6d81e6b6..0a49972eed 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -8,6 +8,7 @@
#include "net.h"
#include "validationinterface.h"
+#include "consensus/params.h"
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
@@ -21,23 +22,51 @@ static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
* Timeout = base + per_header * (expected number of headers) */
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
+/** Protect at least this many outbound peers from disconnection due to slow/
+ * behind headers chain.
+ */
+static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
+/** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */
+static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
+/** How frequently to check for stale tips, in seconds */
+static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
+/** How frequently to check for extra outbound peers and disconnect, in seconds */
+static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
+/** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict, in seconds */
+static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
-/** Register with a network node to receive its signals */
-void RegisterNodeSignals(CNodeSignals& nodeSignals);
-/** Unregister a network node */
-void UnregisterNodeSignals(CNodeSignals& nodeSignals);
-
-class PeerLogicValidation : public CValidationInterface {
+class PeerLogicValidation : public CValidationInterface, public NetEventsInterface {
private:
- CConnman* connman;
+ CConnman* const connman;
public:
- PeerLogicValidation(CConnman* connmanIn);
+ explicit PeerLogicValidation(CConnman* connman, CScheduler &scheduler);
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override;
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override;
void BlockChecked(const CBlock& block, const CValidationState& state) override;
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override;
+
+
+ void InitializeNode(CNode* pnode) override;
+ void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) override;
+ /** Process protocol messages received from a given node */
+ bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override;
+ /**
+ * Send queued protocol messages to be sent to a give node.
+ *
+ * @param[in] pto The node which we are sending messages to.
+ * @param[in] interrupt Interrupt condition for processing threads
+ * @return True if there is more work to be done
+ */
+ bool SendMessages(CNode* pto, std::atomic<bool>& interrupt) override;
+
+ void ConsiderEviction(CNode *pto, int64_t time_in_seconds);
+ void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams);
+ void EvictExtraOutboundPeers(int64_t time_in_seconds);
+
+private:
+ int64_t m_stale_tip_check_time; //! Next time to check for stale tip
};
struct CNodeStateStats {
@@ -52,16 +81,4 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
/** Increase a node's misbehavior score. */
void Misbehaving(NodeId nodeid, int howmuch);
-/** Process protocol messages received from a given node */
-bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& interrupt);
-/**
- * Send queued protocol messages to be sent to a give node.
- *
- * @param[in] pto The node which we are sending messages to.
- * @param[in] connman The connection manager for that node.
- * @param[in] interrupt Interrupt condition for processing threads
- * @return True if there is more work to be done
- */
-bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interrupt);
-
#endif // BITCOIN_NET_PROCESSING_H
diff --git a/src/policy/policy.h b/src/policy/policy.h
index c06820f84e..ef71dd73bc 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -16,10 +16,8 @@
class CCoinsViewCache;
class CTxOut;
-/** Default for -blockmaxsize, which controls the maximum size of block the mining code will create **/
-static const unsigned int DEFAULT_BLOCK_MAX_SIZE = 750000;
/** Default for -blockmaxweight, which controls the range of block weights the mining code will create **/
-static const unsigned int DEFAULT_BLOCK_MAX_WEIGHT = 3000000;
+static const unsigned int DEFAULT_BLOCK_MAX_WEIGHT = MAX_BLOCK_WEIGHT - 4000;
/** Default for -blockmintxfee, which sets the minimum feerate for a transaction in blocks created by mining code **/
static const unsigned int DEFAULT_BLOCK_MIN_TX_FEE = 1000;
/** The maximum weight for transactions we're willing to relay/mine */
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index f498b5c8ea..441cd1e8f4 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -195,7 +195,6 @@ UniValue getmininginfo(const JSONRPCRequest& request)
"\nResult:\n"
"{\n"
" \"blocks\": nnn, (numeric) The current block\n"
- " \"currentblocksize\": nnn, (numeric) The last block size\n"
" \"currentblockweight\": nnn, (numeric) The last block weight\n"
" \"currentblocktx\": nnn, (numeric) The last block transaction\n"
" \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n"
@@ -214,7 +213,6 @@ UniValue getmininginfo(const JSONRPCRequest& request)
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("blocks", (int)chainActive.Height()));
- obj.push_back(Pair("currentblocksize", (uint64_t)nLastBlockSize));
obj.push_back(Pair("currentblockweight", (uint64_t)nLastBlockWeight));
obj.push_back(Pair("currentblocktx", (uint64_t)nLastBlockTx));
obj.push_back(Pair("difficulty", (double)GetDifficulty()));
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index e463a4eda4..2916bdc02f 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -156,7 +156,7 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
// their ver message.
obj.push_back(Pair("subver", stats.cleanSubVer));
obj.push_back(Pair("inbound", stats.fInbound));
- obj.push_back(Pair("addnode", stats.fAddnode));
+ obj.push_back(Pair("addnode", stats.m_manual_connection));
obj.push_back(Pair("startingheight", stats.nStartingHeight));
if (fStateStats) {
obj.push_back(Pair("banscore", statestats.nMisbehavior));
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index ffbeeb7d91..d1f9e63ecf 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -40,8 +40,138 @@ CService ip(uint32_t i)
static NodeId id = 0;
+void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds);
+
BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup)
+// Test eviction of an outbound peer whose chain never advances
+// Mock a node connection, and use mocktime to simulate a peer
+// which never sends any headers messages. PeerLogic should
+// decide to evict that outbound peer, after the appropriate timeouts.
+// Note that we protect 4 outbound nodes from being subject to
+// this logic; this test takes advantage of that protection only
+// being applied to nodes which send headers with sufficient
+// work.
+BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
+{
+ std::atomic<bool> interruptDummy(false);
+
+ // Mock an outbound peer
+ CAddress addr1(ip(0xa0b0c001), NODE_NONE);
+ CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false);
+ dummyNode1.SetSendVersion(PROTOCOL_VERSION);
+
+ peerLogic->InitializeNode(&dummyNode1);
+ dummyNode1.nVersion = 1;
+ dummyNode1.fSuccessfullyConnected = true;
+
+ // This test requires that we have a chain with non-zero work.
+ BOOST_CHECK(chainActive.Tip() != nullptr);
+ BOOST_CHECK(chainActive.Tip()->nChainWork > 0);
+
+ // Test starts here
+ peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in getheaders
+ BOOST_CHECK(dummyNode1.vSendMsg.size() > 0);
+ dummyNode1.vSendMsg.clear();
+
+ int64_t nStartTime = GetTime();
+ // Wait 21 minutes
+ SetMockTime(nStartTime+21*60);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in getheaders
+ BOOST_CHECK(dummyNode1.vSendMsg.size() > 0);
+ // Wait 3 more minutes
+ SetMockTime(nStartTime+24*60);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in disconnect
+ BOOST_CHECK(dummyNode1.fDisconnect == true);
+ SetMockTime(0);
+
+ bool dummy;
+ peerLogic->FinalizeNode(dummyNode1.GetId(), dummy);
+}
+
+void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic)
+{
+ CAddress addr(ip(GetRandInt(0xffffffff)), NODE_NONE);
+ vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false));
+ CNode &node = *vNodes.back();
+ node.SetSendVersion(PROTOCOL_VERSION);
+
+ peerLogic.InitializeNode(&node);
+ node.nVersion = 1;
+ node.fSuccessfullyConnected = true;
+
+ CConnmanTest::AddNode(node);
+}
+
+BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
+{
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+ constexpr int nMaxOutbound = 8;
+ CConnman::Options options;
+ options.nMaxConnections = 125;
+ options.nMaxOutbound = nMaxOutbound;
+ options.nMaxFeeler = 1;
+
+ connman->Init(options);
+ std::vector<CNode *> vNodes;
+
+ // Mock some outbound peers
+ for (int i=0; i<nMaxOutbound; ++i) {
+ AddRandomOutboundPeer(vNodes, *peerLogic);
+ }
+
+ peerLogic->CheckForStaleTipAndEvictPeers(consensusParams);
+
+ // No nodes should be marked for disconnection while we have no extra peers
+ for (const CNode *node : vNodes) {
+ BOOST_CHECK(node->fDisconnect == false);
+ }
+
+ SetMockTime(GetTime() + 3*consensusParams.nPowTargetSpacing + 1);
+
+ // Now tip should definitely be stale, and we should look for an extra
+ // outbound peer
+ peerLogic->CheckForStaleTipAndEvictPeers(consensusParams);
+ BOOST_CHECK(connman->GetTryNewOutboundPeer());
+
+ // Still no peers should be marked for disconnection
+ for (const CNode *node : vNodes) {
+ BOOST_CHECK(node->fDisconnect == false);
+ }
+
+ // If we add one more peer, something should get marked for eviction
+ // on the next check (since we're mocking the time to be in the future, the
+ // required time connected check should be satisfied).
+ AddRandomOutboundPeer(vNodes, *peerLogic);
+
+ peerLogic->CheckForStaleTipAndEvictPeers(consensusParams);
+ for (int i=0; i<nMaxOutbound; ++i) {
+ BOOST_CHECK(vNodes[i]->fDisconnect == false);
+ }
+ // Last added node should get marked for eviction
+ BOOST_CHECK(vNodes.back()->fDisconnect == true);
+
+ vNodes.back()->fDisconnect = false;
+
+ // Update the last announced block time for the last
+ // peer, and check that the next newest node gets evicted.
+ UpdateLastBlockAnnounceTime(vNodes.back()->GetId(), GetTime());
+
+ peerLogic->CheckForStaleTipAndEvictPeers(consensusParams);
+ for (int i=0; i<nMaxOutbound-1; ++i) {
+ BOOST_CHECK(vNodes[i]->fDisconnect == false);
+ }
+ BOOST_CHECK(vNodes[nMaxOutbound-1]->fDisconnect == true);
+ BOOST_CHECK(vNodes.back()->fDisconnect == false);
+
+ bool dummy;
+ for (const CNode *node : vNodes) {
+ peerLogic->FinalizeNode(node->GetId(), dummy);
+ }
+
+ CConnmanTest::ClearNodes();
+}
+
BOOST_AUTO_TEST_CASE(DoS_banning)
{
std::atomic<bool> interruptDummy(false);
@@ -50,27 +180,31 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", true);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
- GetNodeSignals().InitializeNode(&dummyNode1, *connman);
+ peerLogic->InitializeNode(&dummyNode1);
dummyNode1.nVersion = 1;
dummyNode1.fSuccessfullyConnected = true;
Misbehaving(dummyNode1.GetId(), 100); // Should get banned
- SendMessages(&dummyNode1, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy);
BOOST_CHECK(connman->IsBanned(addr1));
BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned
CAddress addr2(ip(0xa0b0c002), NODE_NONE);
CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", true);
dummyNode2.SetSendVersion(PROTOCOL_VERSION);
- GetNodeSignals().InitializeNode(&dummyNode2, *connman);
+ peerLogic->InitializeNode(&dummyNode2);
dummyNode2.nVersion = 1;
dummyNode2.fSuccessfullyConnected = true;
Misbehaving(dummyNode2.GetId(), 50);
- SendMessages(&dummyNode2, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode2, interruptDummy);
BOOST_CHECK(!connman->IsBanned(addr2)); // 2 not banned yet...
BOOST_CHECK(connman->IsBanned(addr1)); // ... but 1 still should be
Misbehaving(dummyNode2.GetId(), 50);
- SendMessages(&dummyNode2, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode2, interruptDummy);
BOOST_CHECK(connman->IsBanned(addr2));
+
+ bool dummy;
+ peerLogic->FinalizeNode(dummyNode1.GetId(), dummy);
+ peerLogic->FinalizeNode(dummyNode2.GetId(), dummy);
}
BOOST_AUTO_TEST_CASE(DoS_banscore)
@@ -82,19 +216,22 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, CAddress(), "", true);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
- GetNodeSignals().InitializeNode(&dummyNode1, *connman);
+ peerLogic->InitializeNode(&dummyNode1);
dummyNode1.nVersion = 1;
dummyNode1.fSuccessfullyConnected = true;
Misbehaving(dummyNode1.GetId(), 100);
- SendMessages(&dummyNode1, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy);
BOOST_CHECK(!connman->IsBanned(addr1));
Misbehaving(dummyNode1.GetId(), 10);
- SendMessages(&dummyNode1, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy);
BOOST_CHECK(!connman->IsBanned(addr1));
Misbehaving(dummyNode1.GetId(), 1);
- SendMessages(&dummyNode1, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode1, interruptDummy);
BOOST_CHECK(connman->IsBanned(addr1));
gArgs.ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD));
+
+ bool dummy;
+ peerLogic->FinalizeNode(dummyNode1.GetId(), dummy);
}
BOOST_AUTO_TEST_CASE(DoS_bantime)
@@ -108,12 +245,12 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
CAddress addr(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", true);
dummyNode.SetSendVersion(PROTOCOL_VERSION);
- GetNodeSignals().InitializeNode(&dummyNode, *connman);
+ peerLogic->InitializeNode(&dummyNode);
dummyNode.nVersion = 1;
dummyNode.fSuccessfullyConnected = true;
Misbehaving(dummyNode.GetId(), 100);
- SendMessages(&dummyNode, *connman, interruptDummy);
+ peerLogic->SendMessages(&dummyNode, interruptDummy);
BOOST_CHECK(connman->IsBanned(addr));
SetMockTime(nStartTime+60*60);
@@ -121,6 +258,9 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
SetMockTime(nStartTime+60*60*24+1);
BOOST_CHECK(!connman->IsBanned(addr));
+
+ bool dummy;
+ peerLogic->FinalizeNode(dummyNode.GetId(), dummy);
}
CTransactionRef RandomOrphan()
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 194f62ca11..01e2a555e6 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -24,6 +24,18 @@
#include <memory>
+void CConnmanTest::AddNode(CNode& node)
+{
+ LOCK(g_connman->cs_vNodes);
+ g_connman->vNodes.push_back(&node);
+}
+
+void CConnmanTest::ClearNodes()
+{
+ LOCK(g_connman->cs_vNodes);
+ g_connman->vNodes.clear();
+}
+
uint256 insecure_rand_seed = GetRandHash();
FastRandomContext insecure_rand_ctx(insecure_rand_seed);
@@ -48,7 +60,6 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
BasicTestingSetup::~BasicTestingSetup()
{
ECC_Stop();
- g_connman.reset();
}
TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(chainName)
@@ -86,16 +97,17 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
threadGroup.create_thread(&ThreadScriptCheck);
g_connman = std::unique_ptr<CConnman>(new CConnman(0x1337, 0x1337)); // Deterministic randomness for tests.
connman = g_connman.get();
- RegisterNodeSignals(GetNodeSignals());
+ peerLogic.reset(new PeerLogicValidation(connman, scheduler));
}
TestingSetup::~TestingSetup()
{
- UnregisterNodeSignals(GetNodeSignals());
threadGroup.interrupt_all();
threadGroup.join_all();
GetMainSignals().FlushBackgroundCallbacks();
GetMainSignals().UnregisterBackgroundSignalScheduler();
+ g_connman.reset();
+ peerLogic.reset();
UnloadBlockIndex();
delete pcoinsTip;
delete pcoinsdbview;
diff --git a/src/test/test_bitcoin.h b/src/test/test_bitcoin.h
index dd3b13c8c8..642be476a6 100644
--- a/src/test/test_bitcoin.h
+++ b/src/test/test_bitcoin.h
@@ -49,12 +49,20 @@ struct BasicTestingSetup {
* Included are data directory, coins database, script check threads setup.
*/
class CConnman;
+class CNode;
+struct CConnmanTest {
+ static void AddNode(CNode& node);
+ static void ClearNodes();
+};
+
+class PeerLogicValidation;
struct TestingSetup: public BasicTestingSetup {
CCoinsViewDB *pcoinsdbview;
fs::path pathTemp;
boost::thread_group threadGroup;
CConnman* connman;
CScheduler scheduler;
+ std::unique_ptr<PeerLogicValidation> peerLogic;
TestingSetup(const std::string& chainName = CBaseChainParams::MAIN);
~TestingSetup();
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 5679086969..6ec544290d 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -253,6 +253,31 @@ BOOST_AUTO_TEST_CASE(util_IsHex)
BOOST_CHECK(!IsHex("0x0000"));
}
+BOOST_AUTO_TEST_CASE(util_IsHexNumber)
+{
+ BOOST_CHECK(IsHexNumber("0x0"));
+ BOOST_CHECK(IsHexNumber("0"));
+ BOOST_CHECK(IsHexNumber("0x10"));
+ BOOST_CHECK(IsHexNumber("10"));
+ BOOST_CHECK(IsHexNumber("0xff"));
+ BOOST_CHECK(IsHexNumber("ff"));
+ BOOST_CHECK(IsHexNumber("0xFfa"));
+ BOOST_CHECK(IsHexNumber("Ffa"));
+ BOOST_CHECK(IsHexNumber("0x00112233445566778899aabbccddeeffAABBCCDDEEFF"));
+ BOOST_CHECK(IsHexNumber("00112233445566778899aabbccddeeffAABBCCDDEEFF"));
+
+ BOOST_CHECK(!IsHexNumber("")); // empty string not allowed
+ BOOST_CHECK(!IsHexNumber("0x")); // empty string after prefix not allowed
+ BOOST_CHECK(!IsHexNumber("0x0 ")); // no spaces at end,
+ BOOST_CHECK(!IsHexNumber(" 0x0")); // or beginning,
+ BOOST_CHECK(!IsHexNumber("0x 0")); // or middle,
+ BOOST_CHECK(!IsHexNumber(" ")); // etc.
+ BOOST_CHECK(!IsHexNumber("0x0ga")); // invalid character
+ BOOST_CHECK(!IsHexNumber("x0")); // broken prefix
+ BOOST_CHECK(!IsHexNumber("0x0x00")); // two prefixes not allowed
+
+}
+
BOOST_AUTO_TEST_CASE(util_seed_insecure_rand)
{
SeedInsecureRand(true);
diff --git a/src/utilstrencodings.cpp b/src/utilstrencodings.cpp
index fd233f6757..741680e93f 100644
--- a/src/utilstrencodings.cpp
+++ b/src/utilstrencodings.cpp
@@ -65,6 +65,19 @@ bool IsHex(const std::string& str)
return (str.size() > 0) && (str.size()%2 == 0);
}
+bool IsHexNumber(const std::string& str)
+{
+ size_t starting_location = 0;
+ if (str.size() > 2 && *str.begin() == '0' && *(str.begin()+1) == 'x') {
+ starting_location = 2;
+ }
+ for (auto c : str.substr(starting_location)) {
+ if (HexDigit(c) < 0) return false;
+ }
+ // Return false for empty string or "0x".
+ return (str.size() > starting_location);
+}
+
std::vector<unsigned char> ParseHex(const char* psz)
{
// convert hex dump to vector
diff --git a/src/utilstrencodings.h b/src/utilstrencodings.h
index 53da60e8f1..192f33fb29 100644
--- a/src/utilstrencodings.h
+++ b/src/utilstrencodings.h
@@ -38,7 +38,13 @@ std::string SanitizeString(const std::string& str, int rule = SAFE_CHARS_DEFAULT
std::vector<unsigned char> ParseHex(const char* psz);
std::vector<unsigned char> ParseHex(const std::string& str);
signed char HexDigit(char c);
+/* Returns true if each character in str is a hex character, and has an even
+ * number of hex digits.*/
bool IsHex(const std::string& str);
+/**
+* Return true if the string is a hex number, optionally prefixed with "0x"
+*/
+bool IsHexNumber(const std::string& str);
std::vector<unsigned char> DecodeBase64(const char* p, bool* pfInvalid = nullptr);
std::string DecodeBase64(const std::string& str);
std::string EncodeBase64(const unsigned char* pch, size_t len);
diff --git a/src/validation.cpp b/src/validation.cpp
index 8bd23a0f1d..8a92992dba 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -80,6 +80,7 @@ int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT;
uint256 hashAssumeValid;
+arith_uint256 nMinimumChainWork;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
@@ -152,6 +153,26 @@ namespace {
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
+ /** In order to efficiently track invalidity of headers, we keep the set of
+ * blocks which we tried to connect and found to be invalid here (ie which
+ * were set to BLOCK_FAILED_VALID since the last restart). We can then
+ * walk this set and check if a new header is a descendant of something in
+ * this set, preventing us from having to walk mapBlockIndex when we try
+ * to connect a bad block and fail.
+ *
+ * While this is more complicated than marking everything which descends
+ * from an invalid block as invalid at the time we discover it to be
+ * invalid, doing so would require walking all of mapBlockIndex to find all
+ * descendants. Since this case should be very rare, keeping track of all
+ * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
+ * well.
+ *
+ * Because we alreardy walk mapBlockIndex in height-order at startup, we go
+ * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
+ * instead of putting things in this set.
+ */
+ std::set<CBlockIndex*> g_failed_blocks;
+
/** Dirty block index entries. */
std::set<CBlockIndex*> setDirtyBlockIndex;
@@ -1032,8 +1053,6 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
bool IsInitialBlockDownload()
{
- const CChainParams& chainParams = Params();
-
// Once this function has returned false, it must remain false.
static std::atomic<bool> latchToFalse{false};
// Optimization: pre-test latch before taking the lock.
@@ -1047,7 +1066,7 @@ bool IsInitialBlockDownload()
return true;
if (chainActive.Tip() == nullptr)
return true;
- if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork))
+ if (chainActive.Tip()->nChainWork < nMinimumChainWork)
return true;
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
@@ -1169,6 +1188,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
+ g_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
@@ -1664,7 +1684,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (it != mapBlockIndex.end()) {
if (it->second->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
- pindexBestHeader->nChainWork >= UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
+ pindexBestHeader->nChainWork >= nMinimumChainWork) {
// This block is a member of the assumed verified chain and an ancestor of the best header.
// The equivalent time check discourages hash power from extorting the network via DOS attack
// into accepting an invalid block through telling users they must manually set assumevalid.
@@ -2518,17 +2538,18 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C
{
AssertLockHeld(cs_main);
- // Mark the block itself as invalid.
- pindex->nStatus |= BLOCK_FAILED_VALID;
- setDirtyBlockIndex.insert(pindex);
- setBlockIndexCandidates.erase(pindex);
+ // We first disconnect backwards and then mark the blocks as invalid.
+ // This prevents a case where pruned nodes may fail to invalidateblock
+ // and be left unable to start as they have no tip candidates (as there
+ // are no blocks that meet the "have data and are not invalid per
+ // nStatus" criteria for inclusion in setBlockIndexCandidates).
+
+ bool pindex_was_in_chain = false;
+ CBlockIndex *invalid_walk_tip = chainActive.Tip();
DisconnectedBlockTransactions disconnectpool;
while (chainActive.Contains(pindex)) {
- CBlockIndex *pindexWalk = chainActive.Tip();
- pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
- setDirtyBlockIndex.insert(pindexWalk);
- setBlockIndexCandidates.erase(pindexWalk);
+ pindex_was_in_chain = true;
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(state, chainparams, &disconnectpool)) {
@@ -2539,6 +2560,21 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C
}
}
+ // Now mark the blocks we just disconnected as descendants invalid
+ // (note this may not be all descendants).
+ while (pindex_was_in_chain && invalid_walk_tip != pindex) {
+ invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(invalid_walk_tip);
+ setBlockIndexCandidates.erase(invalid_walk_tip);
+ invalid_walk_tip = invalid_walk_tip->pprev;
+ }
+
+ // Mark the block itself as invalid.
+ pindex->nStatus |= BLOCK_FAILED_VALID;
+ setDirtyBlockIndex.insert(pindex);
+ setBlockIndexCandidates.erase(pindex);
+ g_failed_blocks.insert(pindex);
+
// DisconnectTip will add transactions to disconnectpool; try to add these
// back to the mempool.
UpdateMempoolForReorg(disconnectpool, true);
@@ -2576,6 +2612,7 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = nullptr;
}
+ g_failed_blocks.erase(it->second);
}
it++;
}
@@ -3052,6 +3089,21 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+
+ if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
+ for (const CBlockIndex* failedit : g_failed_blocks) {
+ if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
+ assert(failedit->nStatus & BLOCK_FAILED_VALID);
+ CBlockIndex* invalid_walk = pindexPrev;
+ while (invalid_walk != failedit) {
+ invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(invalid_walk);
+ invalid_walk = invalid_walk->pprev;
+ }
+ return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ }
+ }
+ }
}
if (pindex == nullptr)
pindex = AddToBlockIndex(block);
@@ -3065,13 +3117,15 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
// Exposed wrapper for AcceptBlockHeader
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid)
{
+ if (first_invalid != nullptr) first_invalid->SetNull();
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
if (!AcceptBlockHeader(header, state, chainparams, &pindex)) {
+ if (first_invalid) *first_invalid = header;
return false;
}
if (ppindex) {
@@ -3101,7 +3155,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
- bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
+ bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
@@ -3118,9 +3172,15 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
// and unrequested blocks.
if (fAlreadyHave) return true;
if (!fRequested) { // If we didn't ask for it:
- if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
- if (!fHasMoreWork) return true; // Don't process less-work chains
- if (fTooFarAhead) return true; // Block height is too high
+ if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
+ if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
+ if (fTooFarAhead) return true; // Block height is too high
+
+ // Protect against DoS attacks from low-work chains.
+ // If our tip is behind, a peer could try to send us
+ // low-work blocks on a fake chain that we would never
+ // request; don't process these.
+ if (pindex->nChainWork < nMinimumChainWork) return true;
}
if (fNewBlock) *fNewBlock = true;
@@ -3470,6 +3530,10 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pindex->nChainTx = pindex->nTx;
}
}
+ if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
+ pindex->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(pindex);
+ }
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
@@ -3860,6 +3924,7 @@ void UnloadBlockIndex()
nLastBlockFile = 0;
nBlockSequenceId = 1;
setDirtyBlockIndex.clear();
+ g_failed_blocks.clear();
setDirtyFileInfo.clear();
versionbitscache.Clear();
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
diff --git a/src/validation.h b/src/validation.h
index edb8eab894..804fb644d9 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -161,7 +161,6 @@ extern CTxMemPool mempool;
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern BlockMap mapBlockIndex;
extern uint64_t nLastBlockTx;
-extern uint64_t nLastBlockSize;
extern uint64_t nLastBlockWeight;
extern const std::string strMessageMagic;
extern CWaitableCriticalSection csBestBlock;
@@ -186,6 +185,9 @@ extern bool fEnableReplacement;
/** Block hash whose ancestors we will assume to have valid scripts without checking them. */
extern uint256 hashAssumeValid;
+/** Minimum work we will assume exists on some valid chain. */
+extern arith_uint256 nMinimumChainWork;
+
/** Best header we've seen so far (used for getheaders queries' starting points). */
extern CBlockIndex *pindexBestHeader;
@@ -245,8 +247,9 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
* @param[out] state This may be set to an Error state if any error occurred processing them
* @param[in] chainparams The params for the chain we want to connect to
* @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
+ * @param[out] first_invalid First header that fails validation, if one exists
*/
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr);
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr, CBlockHeader *first_invalid=nullptr);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index d2fe4866fa..fb6e576074 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -20,6 +20,40 @@
#include <boost/thread.hpp>
+namespace {
+//! Make sure database has a unique fileid within the environment. If it
+//! doesn't, throw an error. BDB caches do not work properly when more than one
+//! open database has the same fileid (values written to one database may show
+//! up in reads to other databases).
+//!
+//! BerkeleyDB generates unique fileids by default
+//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
+//! so bitcoin should never create different databases with the same fileid, but
+//! this error can be triggered if users manually copy database files.
+void CheckUniqueFileid(const CDBEnv& env, const std::string& filename, Db& db)
+{
+ if (env.IsMock()) return;
+
+ u_int8_t fileid[DB_FILE_ID_LEN];
+ int ret = db.get_mpf()->get_fileid(fileid);
+ if (ret != 0) {
+ throw std::runtime_error(strprintf("CDB: Can't open database %s (get_fileid failed with %d)", filename, ret));
+ }
+
+ for (const auto& item : env.mapDb) {
+ u_int8_t item_fileid[DB_FILE_ID_LEN];
+ if (item.second && item.second->get_mpf()->get_fileid(item_fileid) == 0 &&
+ memcmp(fileid, item_fileid, sizeof(fileid)) == 0) {
+ const char* item_filename = nullptr;
+ item.second->get_dbname(&item_filename, nullptr);
+ throw std::runtime_error(strprintf("CDB: Can't open database %s (duplicates fileid %s from %s)", filename,
+ HexStr(std::begin(item_fileid), std::end(item_fileid)),
+ item_filename ? item_filename : "(unknown database)"));
+ }
+ }
+}
+} // namespace
+
//
// CDB
//
@@ -379,35 +413,34 @@ CDB::CDB(CWalletDBWrapper& dbw, const char* pszMode, bool fFlushOnCloseIn) : pdb
if (!env->Open(GetDataDir()))
throw std::runtime_error("CDB: Failed to open database environment.");
- strFile = strFilename;
- ++env->mapFileUseCount[strFile];
- pdb = env->mapDb[strFile];
+ pdb = env->mapDb[strFilename];
if (pdb == nullptr) {
int ret;
- pdb = new Db(env->dbenv, 0);
+ std::unique_ptr<Db> pdb_temp(new Db(env->dbenv, 0));
bool fMockDb = env->IsMock();
if (fMockDb) {
- DbMpoolFile* mpf = pdb->get_mpf();
+ DbMpoolFile* mpf = pdb_temp->get_mpf();
ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
- if (ret != 0)
- throw std::runtime_error(strprintf("CDB: Failed to configure for no temp file backing for database %s", strFile));
+ if (ret != 0) {
+ throw std::runtime_error(strprintf("CDB: Failed to configure for no temp file backing for database %s", strFilename));
+ }
}
- ret = pdb->open(nullptr, // Txn pointer
- fMockDb ? nullptr : strFile.c_str(), // Filename
- fMockDb ? strFile.c_str() : "main", // Logical db name
- DB_BTREE, // Database type
- nFlags, // Flags
+ ret = pdb_temp->open(nullptr, // Txn pointer
+ fMockDb ? nullptr : strFilename.c_str(), // Filename
+ fMockDb ? strFilename.c_str() : "main", // Logical db name
+ DB_BTREE, // Database type
+ nFlags, // Flags
0);
if (ret != 0) {
- delete pdb;
- pdb = nullptr;
- --env->mapFileUseCount[strFile];
- strFile = "";
throw std::runtime_error(strprintf("CDB: Error %d, can't open database %s", ret, strFilename));
}
+ CheckUniqueFileid(*env, strFilename, *pdb_temp);
+
+ pdb = pdb_temp.release();
+ env->mapDb[strFilename] = pdb;
if (fCreate && !Exists(std::string("version"))) {
bool fTmp = fReadOnly;
@@ -415,9 +448,9 @@ CDB::CDB(CWalletDBWrapper& dbw, const char* pszMode, bool fFlushOnCloseIn) : pdb
WriteVersion(CLIENT_VERSION);
fReadOnly = fTmp;
}
-
- env->mapDb[strFile] = pdb;
}
+ ++env->mapFileUseCount[strFilename];
+ strFile = strFilename;
}
}
@@ -673,6 +706,11 @@ bool CWalletDBWrapper::Backup(const std::string& strDest)
pathDest /= strFile;
try {
+ if (fs::equivalent(pathSrc, pathDest)) {
+ LogPrintf("cannot backup to wallet source file %s\n", pathDest.string());
+ return false;
+ }
+
fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists);
LogPrintf("copied %s to %s\n", strFile, pathDest.string());
return true;
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 3614e34fbb..6f3cfe9557 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -45,7 +45,7 @@ public:
void Reset();
void MakeMock();
- bool IsMock() { return fMockDb; }
+ bool IsMock() const { return fMockDb; }
/**
* Verify that database file strFile is OK. If it is not,
diff --git a/test/functional/bip68-sequence.py b/test/functional/bip68-sequence.py
index 1391b9ed46..b3b5adf425 100755
--- a/test/functional/bip68-sequence.py
+++ b/test/functional/bip68-sequence.py
@@ -83,7 +83,7 @@ class BIP68Test(BitcoinTestFramework):
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
- assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
+ assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
@@ -180,7 +180,7 @@ class BIP68Test(BitcoinTestFramework):
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
- assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
+ assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
@@ -227,7 +227,7 @@ class BIP68Test(BitcoinTestFramework):
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
- assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
+ assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
@@ -280,7 +280,7 @@ class BIP68Test(BitcoinTestFramework):
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
- assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
+ assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
@@ -353,7 +353,7 @@ class BIP68Test(BitcoinTestFramework):
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
- assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
+ assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py
index 50be9262e4..4c2997f8bd 100755
--- a/test/functional/blockchain.py
+++ b/test/functional/blockchain.py
@@ -25,7 +25,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
@@ -96,7 +96,7 @@ class BlockchainTest(BitcoinTestFramework):
def _test_getblockheader(self):
node = self.nodes[0]
- assert_raises_jsonrpc(-5, "Block not found",
+ assert_raises_rpc_error(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
diff --git a/test/functional/bumpfee.py b/test/functional/bumpfee.py
index ede0df29d8..c2aa75209c 100755
--- a/test/functional/bumpfee.py
+++ b/test/functional/bumpfee.py
@@ -133,7 +133,7 @@ def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
- assert_raises_jsonrpc(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
+ assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
@@ -153,7 +153,7 @@ def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
- assert_raises_jsonrpc(-4, "Transaction contains inputs that don't belong to this wallet",
+ assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
@@ -164,7 +164,7 @@ def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
- assert_raises_jsonrpc(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
+ assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
@@ -173,7 +173,7 @@ def test_small_output_fails(rbf_node, dest_address):
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
- assert_raises_jsonrpc(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
+ assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
@@ -205,7 +205,7 @@ def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
- assert_raises_jsonrpc(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
+ assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
@@ -213,7 +213,7 @@ def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
- assert_raises_jsonrpc(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
+ assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
@@ -264,7 +264,7 @@ def test_bumpfee_metadata(rbf_node, dest_address):
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
- assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first.",
+ assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
diff --git a/test/functional/disablewallet.py b/test/functional/disablewallet.py
index c1d37963bc..c75ef9b9f1 100755
--- a/test/functional/disablewallet.py
+++ b/test/functional/disablewallet.py
@@ -19,7 +19,7 @@ class DisableWalletTest (BitcoinTestFramework):
def run_test (self):
# Make sure wallet is really disabled
- assert_raises_jsonrpc(-32601, 'Method not found', self.nodes[0].getwalletinfo)
+ assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
@@ -28,7 +28,7 @@ class DisableWalletTest (BitcoinTestFramework):
# Checking mining to an address without a wallet. Generating to a valid address should succeed
# but generating to an invalid address will fail.
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
- assert_raises_jsonrpc(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
+ assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
if __name__ == '__main__':
DisableWalletTest ().main ()
diff --git a/test/functional/disconnect_ban.py b/test/functional/disconnect_ban.py
index a6445b9b35..59655d37fb 100755
--- a/test/functional/disconnect_ban.py
+++ b/test/functional/disconnect_ban.py
@@ -8,7 +8,7 @@ import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
@@ -34,14 +34,14 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
- assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
+ assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
- assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
+ assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
- assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
+ assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
@@ -81,10 +81,10 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
- assert_raises_jsonrpc(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
+ assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
- assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
+ assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
diff --git a/test/functional/fundrawtransaction.py b/test/functional/fundrawtransaction.py
index 42d95dd520..1c4f269ec0 100755
--- a/test/functional/fundrawtransaction.py
+++ b/test/functional/fundrawtransaction.py
@@ -179,7 +179,7 @@ class RawTransactionsTest(BitcoinTestFramework):
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
- assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
+ assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
@@ -192,7 +192,7 @@ class RawTransactionsTest(BitcoinTestFramework):
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
- assert_raises_jsonrpc(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
+ assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
@@ -206,7 +206,7 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
- assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
+ assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
@@ -315,7 +315,7 @@ class RawTransactionsTest(BitcoinTestFramework):
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
- assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
+ assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
@@ -470,14 +470,14 @@ class RawTransactionsTest(BitcoinTestFramework):
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
- assert_raises_jsonrpc(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
+ assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
- assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
+ assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
diff --git a/test/functional/import-rescan.py b/test/functional/import-rescan.py
index 02d7428120..955554204d 100755
--- a/test/functional/import-rescan.py
+++ b/test/functional/import-rescan.py
@@ -19,9 +19,8 @@ importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
-from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (connect_nodes, sync_blocks, assert_equal, set_node_times)
+from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
@@ -35,21 +34,26 @@ Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
+ def try_rpc(self, func, *args, **kwargs):
+ if self.expect_disabled:
+ assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
- response, error = try_rpc(self.node.importaddress, self.address["address"], self.label,
- self.rescan == Rescan.yes)
+ response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
+ self.rescan == Rescan.yes)
elif self.data == Data.pub:
- response, error = try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
- self.rescan == Rescan.yes)
+ response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
+ self.rescan == Rescan.yes)
elif self.data == Data.priv:
- response, error = try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
+ response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
- assert_equal(error, {'message': 'Rescan is disabled in pruned mode',
- 'code': -4} if self.expect_disabled else None)
+
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
@@ -180,13 +184,5 @@ class ImportRescanTest(BitcoinTestFramework):
else:
variant.check()
-
-def try_rpc(func, *args, **kwargs):
- try:
- return func(*args, **kwargs), None
- except JSONRPCException as e:
- return None, e.error
-
-
if __name__ == "__main__":
ImportRescanTest().main()
diff --git a/test/functional/importmulti.py b/test/functional/importmulti.py
index 6907d64070..324f6458e3 100755
--- a/test/functional/importmulti.py
+++ b/test/functional/importmulti.py
@@ -448,11 +448,11 @@ class ImportMultiTest (BitcoinTestFramework):
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
- assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
+ assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
- assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
+ assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
diff --git a/test/functional/importprunedfunds.py b/test/functional/importprunedfunds.py
index 996ff96cc3..c445b66df3 100755
--- a/test/functional/importprunedfunds.py
+++ b/test/functional/importprunedfunds.py
@@ -67,7 +67,7 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
self.sync_all()
#Import with no affiliated address
- assert_raises_jsonrpc(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
+ assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
@@ -98,7 +98,7 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
assert_equal(address_info['ismine'], True)
#Remove transactions
- assert_raises_jsonrpc(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
+ assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
diff --git a/test/functional/keypool.py b/test/functional/keypool.py
index b823ca63bb..f2701c36bd 100755
--- a/test/functional/keypool.py
+++ b/test/functional/keypool.py
@@ -28,7 +28,7 @@ class KeyPoolTest(BitcoinTestFramework):
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
- assert_raises_jsonrpc(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
+ assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
@@ -47,7 +47,7 @@ class KeyPoolTest(BitcoinTestFramework):
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
- assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
+ assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
@@ -58,7 +58,7 @@ class KeyPoolTest(BitcoinTestFramework):
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
- assert_raises_jsonrpc(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
+ assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
@@ -72,7 +72,7 @@ class KeyPoolTest(BitcoinTestFramework):
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
- assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].generate, 1)
+ assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 2dedadf8ce..6d3849bf57 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -115,7 +115,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
- assert_raises_jsonrpc(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
+ assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
@@ -167,7 +167,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
- assert_raises_jsonrpc(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
+ assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index 7dfddd3230..2803371f5b 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -50,14 +50,14 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
- assert_raises_jsonrpc(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
+ assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
- assert_raises_jsonrpc(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
+ assert_raises_rpc_error(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
diff --git a/test/functional/mempool_spendcoinbase.py b/test/functional/mempool_spendcoinbase.py
index 58ccd3e373..6e8a635a76 100755
--- a/test/functional/mempool_spendcoinbase.py
+++ b/test/functional/mempool_spendcoinbase.py
@@ -36,7 +36,7 @@ class MempoolSpendCoinbaseTest(BitcoinTestFramework):
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
- assert_raises_jsonrpc(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
+ assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
diff --git a/test/functional/merkle_blocks.py b/test/functional/merkle_blocks.py
index a58334b2a5..b3989a4c54 100755
--- a/test/functional/merkle_blocks.py
+++ b/test/functional/merkle_blocks.py
@@ -38,7 +38,7 @@ class MerkleBlockTest(BitcoinTestFramework):
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a block
- assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
+ assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
@@ -63,11 +63,11 @@ class MerkleBlockTest(BitcoinTestFramework):
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
- assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
+ assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
- assert_raises_jsonrpc(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
@@ -76,7 +76,7 @@ class MerkleBlockTest(BitcoinTestFramework):
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
- assert_raises_jsonrpc(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
+ assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
if __name__ == '__main__':
diff --git a/test/functional/minchainwork.py b/test/functional/minchainwork.py
new file mode 100755
index 0000000000..e136dc2b7c
--- /dev/null
+++ b/test/functional/minchainwork.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test logic for setting nMinimumChainWork on command line.
+
+Nodes don't consider themselves out of "initial block download" until
+their active chain has more work than nMinimumChainWork.
+
+Nodes don't download blocks from a peer unless the peer's best known block
+has more work than nMinimumChainWork.
+
+While in initial block download, nodes won't relay blocks to their peers, so
+test that this parameter functions as intended by verifying that block relay
+only succeeds past a given node once its nMinimumChainWork has been exceeded.
+"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import sync_blocks, connect_nodes, assert_equal
+
+# 2 hashes required per regtest block (with no difficulty adjustment)
+REGTEST_WORK_PER_BLOCK = 2
+
+class MinimumChainWorkTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
+ self.node_min_work = [0, 101, 101]
+
+ def setup_network(self):
+ # This test relies on the chain setup being:
+ # node0 <- node1 <- node2
+ # Before leaving IBD, nodes prefer to download blocks from outbound
+ # peers, so ensure that we're mining on an outbound peer and testing
+ # block relay to inbound peers.
+ self.setup_nodes()
+ for i in range(self.num_nodes-1):
+ connect_nodes(self.nodes[i+1], i)
+
+ def run_test(self):
+ # Start building a chain on node0. node2 shouldn't be able to sync until node1's
+ # minchainwork is exceeded
+ starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
+ self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
+
+ starting_blockcount = self.nodes[2].getblockcount()
+
+ num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
+ self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
+ hashes = self.nodes[0].generate(num_blocks_to_generate)
+
+ self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
+
+ # Sleep a few seconds and verify that node2 didn't get any new blocks
+ # or headers. We sleep, rather than sync_blocks(node0, node1) because
+ # it's reasonable either way for node1 to get the blocks, or not get
+ # them (since they're below node1's minchainwork).
+ time.sleep(3)
+
+ self.log.info("Verifying node 2 has no more blocks than before")
+ self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
+ # Node2 shouldn't have any new headers yet, because node1 should not
+ # have relayed anything.
+ assert_equal(len(self.nodes[2].getchaintips()), 1)
+ assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
+
+ assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
+ assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
+
+ self.log.info("Generating one more block")
+ self.nodes[0].generate(1)
+
+ self.log.info("Verifying nodes are all synced")
+
+ # Because nodes in regtest are all manual connections (eg using
+ # addnode), node1 should not have disconnected node0. If not for that,
+ # we'd expect node1 to have disconnected node0 for serving an
+ # insufficient work chain, in which case we'd need to reconnect them to
+ # continue the test.
+ # NOTE: before commit 44407100ff9b478d6131a1c38ee993b50b1830df, regtest
+ # connections were not manual, so the reconnect is necessary.
+ if (len(self.nodes[0].getpeerinfo()) == 0):
+ connect_nodes(self.nodes[1], 0)
+
+ self.sync_all()
+ self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
+
+if __name__ == '__main__':
+ MinimumChainWorkTest().main()
diff --git a/test/functional/mining.py b/test/functional/mining.py
index 93f9838896..9aee06864e 100755
--- a/test/functional/mining.py
+++ b/test/functional/mining.py
@@ -15,7 +15,7 @@ from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_raises_jsonrpc
+from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
@@ -38,7 +38,6 @@ class MiningTest(BitcoinTestFramework):
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
- assert_equal(mining_info['currentblocksize'], 0)
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
@@ -69,7 +68,7 @@ class MiningTest(BitcoinTestFramework):
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
- assert_raises_jsonrpc(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
+ assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
@@ -78,10 +77,10 @@ class MiningTest(BitcoinTestFramework):
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
- assert_raises_jsonrpc(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
+ assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
- assert_raises_jsonrpc(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
+ assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
@@ -108,7 +107,7 @@ class MiningTest(BitcoinTestFramework):
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
- assert_raises_jsonrpc(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
+ assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
diff --git a/test/functional/multiwallet.py b/test/functional/multiwallet.py
index b4e15a3322..ba6b659ddc 100755
--- a/test/functional/multiwallet.py
+++ b/test/functional/multiwallet.py
@@ -7,9 +7,10 @@
Verify that a bitcoind node can load multiple wallet files
"""
import os
+import shutil
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_raises_jsonrpc
+from test_framework.util import assert_equal, assert_raises_rpc_error
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
@@ -29,6 +30,11 @@ class MultiWalletTest(BitcoinTestFramework):
os.mkdir(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w11'))
self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.')
+ # should not initialize if one wallet is a copy of another
+ shutil.copyfile(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w2'),
+ os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w22'))
+ self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid')
+
# should not initialize if wallet file is a symlink
os.symlink(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w1'), os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w12'))
self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.')
@@ -43,10 +49,10 @@ class MultiWalletTest(BitcoinTestFramework):
w1.generate(1)
# accessing invalid wallet fails
- assert_raises_jsonrpc(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
+ assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
- assert_raises_jsonrpc(-19, "Wallet file not specified", self.nodes[0].getwalletinfo)
+ assert_raises_rpc_error(-19, "Wallet file not specified", self.nodes[0].getwalletinfo)
# check w1 wallet balance
w1_info = w1.getwalletinfo()
diff --git a/test/functional/net.py b/test/functional/net.py
index 37ea6824d5..61a5e1f2e9 100755
--- a/test/functional/net.py
+++ b/test/functional/net.py
@@ -12,7 +12,7 @@ import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
)
@@ -84,7 +84,7 @@ class NetTest(BitcoinTestFramework):
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
- assert_raises_jsonrpc(-24, "Node has not been added",
+ assert_raises_rpc_error(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py
index 60d0d876df..91c4550653 100755
--- a/test/functional/nulldummy.py
+++ b/test/functional/nulldummy.py
@@ -71,7 +71,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
- assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
+ assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
@@ -80,14 +80,14 @@ class NULLDUMMYTest(BitcoinTestFramework):
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
- assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
+ assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
- assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
+ assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py
index 293bc05539..220b776369 100755
--- a/test/functional/p2p-acceptblock.py
+++ b/test/functional/p2p-acceptblock.py
@@ -4,37 +4,32 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
-Since behavior differs when receiving unrequested blocks from whitelisted peers
-versus non-whitelisted peers, this tests the behavior of both (effectively two
-separate tests running in parallel).
+Setup: two nodes, node0+node1, not connected to each other. Node1 will have
+nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
-Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
-whitelist localhost, but node1 does. They will each be on their own chain for
-this test.
-
-We have one NodeConn connection to each, test_node and white_node respectively.
+We have one NodeConn connection to node0 called test_node, and one to node1
+called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
- The tip should advance.
+ The tip should advance for node0, but node1 should skip processing due to
+ nMinimumChainWork.
+
+Node1 is unused in tests 3-7:
-3. Mine a block that forks the previous block, and deliver to each node from
- corresponding peer.
- Node0 should not process this block (just accept the header), because it is
- unrequested and doesn't have more work than the tip.
- Node1 should process because this is coming from a whitelisted peer.
+3. Mine a block that forks from the genesis block, and deliver to test_node.
+ Node0 should not process this block (just accept the header), because it
+ is unrequested and doesn't have more or equal work to the tip.
-4. Send another block that builds on the forking block.
- Node0 should process this block but be stuck on the shorter chain, because
- it's missing an intermediate block.
- Node1 should reorg to this longer chain.
+4a,b. Send another two blocks that build on the forking block.
+ Node0 should process the second block but be stuck on the shorter chain,
+ because it's missing an intermediate block.
-4b.Send 288 more blocks on the longer chain.
+4c.Send 288 more blocks on the longer chain (the number of blocks ahead
+ we currently store).
Node0 should process all but the last block (too far ahead in height).
- Send all headers to Node1, and then send the last block in that chain.
- Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
@@ -46,13 +41,21 @@ The test:
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
+
+8. Create a fork which is invalid at a height longer than the current chain
+ (ie to which the node will try to reorg) but which has headers built on top
+ of the invalid block. Check that we get disconnected if we send more headers
+ on the chain the node now knows to be invalid.
+
+9. Test Node1 is able to sync when connected to node0 (which should have sufficient
+ work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
-from test_framework.blocktools import create_block, create_coinbase
+from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
@@ -63,37 +66,39 @@ class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
- self.extra_args = [[], ["-whitelist=127.0.0.1"]]
+ self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
+ # Node2 will be used for non-whitelisted peers to test the interaction
+ # with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
- test_node = NodeConnCB() # connects to node0 (not whitelisted)
- white_node = NodeConnCB() # connects to node1 (whitelisted)
+ test_node = NodeConnCB() # connects to node0
+ min_work_node = NodeConnCB() # connects to node1
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
- connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
+ connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], min_work_node))
test_node.add_connection(connections[0])
- white_node.add_connection(connections[1])
+ min_work_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
- white_node.wait_for_verack()
+ min_work_node.wait_for_verack()
- # 1. Have both nodes mine a block (leave IBD)
+ # 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
- # This should be accepted.
+ # This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
@@ -101,95 +106,119 @@ class AcceptBlockTest(BitcoinTestFramework):
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
- white_node.send_message(msg_block(blocks_h2[1]))
+ min_work_node.send_message(msg_block(blocks_h2[1]))
- [ x.sync_with_ping() for x in [test_node, white_node] ]
+ for x in [test_node, min_work_node]:
+ x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
- assert_equal(self.nodes[1].getblockcount(), 2)
- self.log.info("First height 2 block accepted by both nodes")
+ assert_equal(self.nodes[1].getblockcount(), 1)
+ self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
- # 3. Send another block that builds on the original tip.
- blocks_h2f = [] # Blocks at height 2 that fork off the main chain
- for i in range(2):
- blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
- blocks_h2f[i].solve()
- test_node.send_message(msg_block(blocks_h2f[0]))
- white_node.send_message(msg_block(blocks_h2f[1]))
+ # 3. Send another block that builds on genesis.
+ block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
+ block_time += 1
+ block_h1f.solve()
+ test_node.send_message(msg_block(block_h1f))
- [ x.sync_with_ping() for x in [test_node, white_node] ]
+ test_node.sync_with_ping()
+ tip_entry_found = False
for x in self.nodes[0].getchaintips():
- if x['hash'] == blocks_h2f[0].hash:
+ if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
- for x in self.nodes[1].getchaintips():
- if x['hash'] == blocks_h2f[1].hash:
- assert_equal(x['status'], "valid-headers")
+ # 4. Send another two block that build on the fork.
+ block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
+ block_time += 1
+ block_h2f.solve()
+ test_node.send_message(msg_block(block_h2f))
- self.log.info("Second height 2 block accepted only from whitelisted peer")
+ test_node.sync_with_ping()
+ # Since the earlier block was not processed by node, the new block
+ # can't be fully validated.
+ tip_entry_found = False
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == block_h2f.hash:
+ assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
- # 4. Now send another block that builds on the forking chain.
- blocks_h3 = []
- for i in range(2):
- blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
- blocks_h3[i].solve()
- test_node.send_message(msg_block(blocks_h3[0]))
- white_node.send_message(msg_block(blocks_h3[1]))
+ # But this block should be accepted by node since it has equal work.
+ self.nodes[0].getblock(block_h2f.hash)
+ self.log.info("Second height 2 block accepted, but not reorg'ed to")
- [ x.sync_with_ping() for x in [test_node, white_node] ]
- # Since the earlier block was not processed by node0, the new block
+ # 4b. Now send another block that builds on the forking chain.
+ block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
+ block_h3.solve()
+ test_node.send_message(msg_block(block_h3))
+
+ test_node.sync_with_ping()
+ # Since the earlier block was not processed by node, the new block
# can't be fully validated.
+ tip_entry_found = False
for x in self.nodes[0].getchaintips():
- if x['hash'] == blocks_h3[0].hash:
+ if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ self.nodes[0].getblock(block_h3.hash)
+
+ # But this block should be accepted by node since it has more work.
+ self.nodes[0].getblock(block_h3.hash)
+ self.log.info("Unrequested more-work block accepted")
+
+ # 4c. Now mine 288 more blocks and deliver; all should be processed but
+ # the last (height-too-high) on node (as long as its not missing any headers)
+ tip = block_h3
+ all_blocks = []
+ for i in range(288):
+ next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
+ next_block.solve()
+ all_blocks.append(next_block)
+ tip = next_block
+
+ # Now send the block at height 5 and check that it wasn't accepted (missing header)
+ test_node.send_message(msg_block(all_blocks[1]))
+ test_node.sync_with_ping()
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
- # But this block should be accepted by node0 since it has more work.
- self.nodes[0].getblock(blocks_h3[0].hash)
- self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
+ # The block at height 5 should be accepted if we provide the missing header, though
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(all_blocks[0]))
+ test_node.send_message(headers_message)
+ test_node.send_message(msg_block(all_blocks[1]))
+ test_node.sync_with_ping()
+ self.nodes[0].getblock(all_blocks[1].hash)
- # Node1 should have accepted and reorged.
- assert_equal(self.nodes[1].getblockcount(), 3)
- self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
+ # Now send the blocks in all_blocks
+ for i in range(288):
+ test_node.send_message(msg_block(all_blocks[i]))
+ test_node.sync_with_ping()
- # 4b. Now mine 288 more blocks and deliver; all should be processed but
- # the last (height-too-high) on node0. Node1 should process the tip if
- # we give it the headers chain leading to the tip.
- tips = blocks_h3
- headers_message = msg_headers()
- all_blocks = [] # node0's blocks
- for j in range(2):
- for i in range(288):
- next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
- next_block.solve()
- if j==0:
- test_node.send_message(msg_block(next_block))
- all_blocks.append(next_block)
- else:
- headers_message.headers.append(CBlockHeader(next_block))
- tips[j] = next_block
-
- time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
- assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
-
- headers_message.headers.pop() # Ensure the last block is unrequested
- white_node.send_message(headers_message) # Send headers leading to tip
- white_node.send_message(msg_block(tips[1])) # Now deliver the tip
- white_node.sync_with_ping()
- self.nodes[1].getblock(tips[1].hash)
- self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
- test_node.send_message(msg_block(blocks_h2f[0]))
- # Here, if the sleep is too short, the test could falsely succeed (if the
- # node hasn't processed the block by the time the sleep returns, and then
- # the node processes it and incorrectly advances the tip).
- # But this would be caught later on, when we verify that an inv triggers
- # a getdata request for this block.
+ # The node should have requested the blocks at some point, so
+ # disconnect/reconnect first
+ connections[0].disconnect_node()
+ test_node.wait_for_disconnect()
+
+ test_node = NodeConnCB() # connects to node (not whitelisted)
+ connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
+ test_node.add_connection(connections[0])
+
+ test_node.wait_for_verack()
+ test_node.send_message(msg_block(block_h1f))
+
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
@@ -200,23 +229,100 @@ class AcceptBlockTest(BitcoinTestFramework):
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
- test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
+ test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
- assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
+ assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
- test_node.send_message(msg_block(blocks_h2f[0]))
+ test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
+ self.nodes[0].getblock(all_blocks[286].hash)
+ assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
+ # 8. Create a chain which is invalid at a height longer than the
+ # current chain, but which has more blocks on top of that
+ block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
+ block_289f.solve()
+ block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
+ block_290f.solve()
+ block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
+ # block_291 spends a coinbase below maturity!
+ block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
+ block_291.hashMerkleRoot = block_291.calc_merkle_root()
+ block_291.solve()
+ block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
+ block_292.solve()
+
+ # Now send all the headers on the chain and enough blocks to trigger reorg
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(block_289f))
+ headers_message.headers.append(CBlockHeader(block_290f))
+ headers_message.headers.append(CBlockHeader(block_291))
+ headers_message.headers.append(CBlockHeader(block_292))
+ test_node.send_message(headers_message)
+
+ test_node.sync_with_ping()
+ tip_entry_found = False
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == block_292.hash:
+ assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
+
+ test_node.send_message(msg_block(block_289f))
+ test_node.send_message(msg_block(block_290f))
+
+ test_node.sync_with_ping()
+ self.nodes[0].getblock(block_289f.hash)
+ self.nodes[0].getblock(block_290f.hash)
+
+ test_node.send_message(msg_block(block_291))
+
+ # At this point we've sent an obviously-bogus block, wait for full processing
+ # without assuming whether we will be disconnected or not
+ try:
+ # Only wait a short while so the test doesn't take forever if we do get
+ # disconnected
+ test_node.sync_with_ping(timeout=1)
+ except AssertionError:
+ test_node.wait_for_disconnect()
+
+ test_node = NodeConnCB() # connects to node (not whitelisted)
+ connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
+ test_node.add_connection(connections[0])
+
+ NetworkThread().start() # Start up network handling in another thread
+ test_node.wait_for_verack()
+
+ # We should have failed reorg and switched back to 290 (but have block 291)
+ assert_equal(self.nodes[0].getblockcount(), 290)
+ assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
+ assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
+
+ # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
+ block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
+ block_293.solve()
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(block_293))
+ test_node.send_message(headers_message)
+ test_node.wait_for_disconnect()
+
+ # 9. Connect node1 to node0 and ensure it is able to sync
+ connect_nodes(self.nodes[0], 1)
+ sync_blocks([self.nodes[0], self.nodes[1]])
+ self.log.info("Successfully synced nodes 1 and 0")
+
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
diff --git a/test/functional/prioritise_transaction.py b/test/functional/prioritise_transaction.py
index 7ad368acd4..bb56db9b40 100755
--- a/test/functional/prioritise_transaction.py
+++ b/test/functional/prioritise_transaction.py
@@ -101,7 +101,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
- assert_raises_jsonrpc(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
+ assert_raises_rpc_error(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee
diff --git a/test/functional/pruning.py b/test/functional/pruning.py
index a4afbd46d7..1b0f0ba1cd 100755
--- a/test/functional/pruning.py
+++ b/test/functional/pruning.py
@@ -185,7 +185,7 @@ class PruneTest(BitcoinTestFramework):
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
- assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
+ assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
@@ -232,7 +232,7 @@ class PruneTest(BitcoinTestFramework):
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
- assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
+ assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.stop_node(node_number)
@@ -265,14 +265,14 @@ class PruneTest(BitcoinTestFramework):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
- assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
+ assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
- assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
+ assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
diff --git a/test/functional/rawtransactions.py b/test/functional/rawtransactions.py
index 51dda09ac4..ec9467eb44 100755
--- a/test/functional/rawtransactions.py
+++ b/test/functional/rawtransactions.py
@@ -48,7 +48,7 @@ class RawTransactionsTest(BitcoinTestFramework):
rawtx = self.nodes[2].signrawtransaction(rawtx)
# This will raise an exception since there are missing inputs
- assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
+ assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#########################
# RAW TX MULTISIG TESTS #
@@ -190,13 +190,13 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
- assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, "Flase")
+ assert_raises_rpc_error(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
- assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, [])
+ assert_raises_rpc_error(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
- assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, {})
+ assert_raises_rpc_error(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
@@ -207,12 +207,12 @@ class RawTransactionsTest(BitcoinTestFramework):
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
- assert_raises_jsonrpc(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
+ assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
- assert_raises_jsonrpc(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
+ assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
diff --git a/test/functional/replace-by-fee.py b/test/functional/replace-by-fee.py
index 1fd7c91278..4ef10e6b36 100755
--- a/test/functional/replace-by-fee.py
+++ b/test/functional/replace-by-fee.py
@@ -136,9 +136,9 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
- assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 BTC fee
tx1b = CTransaction()
@@ -146,7 +146,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# Replacement still disabled even with "enough fee"
- assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
@@ -189,7 +189,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
@@ -250,7 +250,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 BTC fee is enough
dbl_tx = CTransaction()
@@ -278,7 +278,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
- assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+ assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
@@ -302,7 +302,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
@@ -325,7 +325,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx2_hex = txToHex(tx2)
# This will raise an exception
- assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
+ assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
@@ -342,7 +342,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx2_hex = txToHex(tx2)
# This will raise an exception
- assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
+ assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
@@ -361,7 +361,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx2_hex = txToHex(tx2)
# This will raise an exception
- assert_raises_jsonrpc(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
+ assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
@@ -407,7 +407,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
double_tx_hex = txToHex(double_tx)
# This will raise an exception
- assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
+ assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
@@ -434,7 +434,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx1b_hex = txToHex(tx1b)
# This will raise an exception
- assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
@@ -452,7 +452,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx2b_hex = txToHex(tx2b)
# This will raise an exception
- assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
+ assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
@@ -504,7 +504,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
@@ -531,7 +531,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
- assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
+ assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
diff --git a/test/functional/resendwallettransactions.py b/test/functional/resendwallettransactions.py
index d6ba591391..d959bb4c38 100755
--- a/test/functional/resendwallettransactions.py
+++ b/test/functional/resendwallettransactions.py
@@ -5,7 +5,7 @@
"""Test resendwallettransactions RPC."""
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_raises_jsonrpc
+from test_framework.util import assert_equal, assert_raises_rpc_error
class ResendWalletTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -14,7 +14,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
def run_test(self):
# Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled.
- assert_raises_jsonrpc(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
+ assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
# Should return an empty array if there aren't unconfirmed wallet transactions.
self.stop_node(0)
diff --git a/test/functional/rpcbind_test.py b/test/functional/rpcbind_test.py
index 0cf64beebd..0e8c3fa209 100755
--- a/test/functional/rpcbind_test.py
+++ b/test/functional/rpcbind_test.py
@@ -101,7 +101,7 @@ class RPCBindTest(BitcoinTestFramework):
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
- assert_raises_jsonrpc(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
+ assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
diff --git a/test/functional/rpcnamedargs.py b/test/functional/rpcnamedargs.py
index da61cc66e6..0fb0a62afd 100755
--- a/test/functional/rpcnamedargs.py
+++ b/test/functional/rpcnamedargs.py
@@ -7,7 +7,7 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
)
class NamedArgumentTest(BitcoinTestFramework):
@@ -19,7 +19,7 @@ class NamedArgumentTest(BitcoinTestFramework):
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
- assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
+ assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
diff --git a/test/functional/segwit.py b/test/functional/segwit.py
index c08fbd3e77..7cd87e5626 100755
--- a/test/functional/segwit.py
+++ b/test/functional/segwit.py
@@ -100,11 +100,11 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
- assert_raises_jsonrpc(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
+ assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
- assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
+ assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
sync_blocks(self.nodes)
def run_test(self):
@@ -443,11 +443,7 @@ class SegWitTest(BitcoinTestFramework):
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
- try:
- self.nodes[0].importaddress(i,"",False,True)
- except JSONRPCException as exp:
- assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
- assert_equal(exp.error["code"], -4)
+ try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
@@ -460,7 +456,7 @@ class SegWitTest(BitcoinTestFramework):
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
- assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
+ assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
@@ -543,7 +539,7 @@ class SegWitTest(BitcoinTestFramework):
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
- assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
+ assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
diff --git a/test/functional/signrawtransactions.py b/test/functional/signrawtransactions.py
index b47ef93955..9a45d53cb8 100755
--- a/test/functional/signrawtransactions.py
+++ b/test/functional/signrawtransactions.py
@@ -82,7 +82,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
- assert_raises(JSONRPCException, self.nodes[0].decoderawtransaction, rawTx + "00")
+ assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 843b67cd5c..dc8731066b 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -1622,7 +1622,6 @@ class NodeConnCB(object):
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
- return True
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index a53eb51799..7aabdd9c00 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -102,8 +102,11 @@ class BitcoinTestFramework(object):
check_json_precision()
+ self.options.cachedir = os.path.abspath(self.options.cachedir)
+
# Set up temp directory and start logging
if self.options.tmpdir:
+ self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 64966adb97..102c903018 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -51,6 +51,8 @@ def assert_raises(exc, fun, *args, **kwds):
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
+ except JSONRPCException:
+ raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
@@ -83,7 +85,7 @@ def assert_raises_process_error(returncode, output, fun, *args, **kwds):
else:
raise AssertionError("No exception raised")
-def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
+def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
@@ -99,6 +101,13 @@ def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
+ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
+
+def try_rpc(code, message, fun, *args, **kwds):
+ """Tries to run an rpc command.
+
+ Test against error code and message if the rpc fails.
+ Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
@@ -107,10 +116,11 @@ def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
+ return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
- raise AssertionError("No exception raised")
+ return False
def assert_is_hex_string(string):
try:
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index fae4f66d70..ca74bb68da 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -121,6 +121,8 @@ BASE_SCRIPTS= [
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
+ 'minchainwork.py',
+ 'p2p-acceptblock.py',
]
EXTENDED_SCRIPTS = [
@@ -148,7 +150,6 @@ EXTENDED_SCRIPTS = [
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
- 'p2p-acceptblock.py',
'replace-by-fee.py',
]
diff --git a/test/functional/wallet-dump.py b/test/functional/wallet-dump.py
index bd745ac854..2ca01c7f08 100755
--- a/test/functional/wallet-dump.py
+++ b/test/functional/wallet-dump.py
@@ -7,7 +7,7 @@
import os
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (assert_equal, assert_raises_jsonrpc)
+from test_framework.util import (assert_equal, assert_raises_rpc_error)
def read_dump(file_name, addrs, hd_master_addr_old):
@@ -106,7 +106,7 @@ class WalletDumpTest(BitcoinTestFramework):
assert_equal(found_addr_rsv, 90*2)
# Overwriting should fail
- assert_raises_jsonrpc(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump")
+ assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump")
if __name__ == '__main__':
WalletDumpTest().main ()
diff --git a/test/functional/wallet-encryption.py b/test/functional/wallet-encryption.py
index ce1e7744e9..db62e1e30f 100755
--- a/test/functional/wallet-encryption.py
+++ b/test/functional/wallet-encryption.py
@@ -9,7 +9,7 @@ import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
)
class WalletEncryptionTest(BitcoinTestFramework):
@@ -32,7 +32,7 @@ class WalletEncryptionTest(BitcoinTestFramework):
self.start_node(0)
# Test that the wallet is encrypted
- assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+ assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
@@ -40,20 +40,20 @@ class WalletEncryptionTest(BitcoinTestFramework):
# Check that the timeout is right
time.sleep(2)
- assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+ assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
- assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
+ assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
- assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+ assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
- assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
+ assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
diff --git a/test/functional/wallet.py b/test/functional/wallet.py
index de3aa8a875..7d6b0b24b6 100755
--- a/test/functional/wallet.py
+++ b/test/functional/wallet.py
@@ -102,7 +102,7 @@ class WalletTest(BitcoinTestFramework):
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
- assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
+ assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
@@ -287,10 +287,10 @@ class WalletTest(BitcoinTestFramework):
assert_equal(txObj['amount'], Decimal('-0.0001'))
# This will raise an exception because the amount type is wrong
- assert_raises_jsonrpc(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
+ assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
- assert_raises_jsonrpc(-1, "not an integer", self.nodes[0].generate, "2")
+ assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
@@ -423,7 +423,7 @@ class WalletTest(BitcoinTestFramework):
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
- assert_raises_jsonrpc(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
+ assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py
index 15ea26afa1..85a149793e 100755
--- a/test/functional/walletbackup.py
+++ b/test/functional/walletbackup.py
@@ -190,6 +190,16 @@ class WalletBackupTest(BitcoinTestFramework):
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
+ # Backup to source wallet file must fail
+ sourcePaths = [
+ tmpdir + "/node0/regtest/wallet.dat",
+ tmpdir + "/node0/./regtest/wallet.dat",
+ tmpdir + "/node0/regtest/",
+ tmpdir + "/node0/regtest"]
+
+ for sourcePath in sourcePaths:
+ assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
+
if __name__ == '__main__':
WalletBackupTest().main()
diff --git a/test/functional/zapwallettxes.py b/test/functional/zapwallettxes.py
index 83b11035c8..8cd622dc8e 100755
--- a/test/functional/zapwallettxes.py
+++ b/test/functional/zapwallettxes.py
@@ -17,7 +17,7 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- assert_raises_jsonrpc,
+ assert_raises_rpc_error,
wait_until,
)
@@ -72,7 +72,7 @@ class ZapWalletTXesTest (BitcoinTestFramework):
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
- assert_raises_jsonrpc(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
+ assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()