aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rwxr-xr-xcontrib/devtools/commit-script-check.sh39
-rw-r--r--src/net.cpp14
-rw-r--r--src/net.h2
-rw-r--r--src/net_processing.cpp103
-rw-r--r--src/rpc/blockchain.cpp1
-rwxr-xr-xtest/functional/abandonconflict.py9
-rwxr-xr-xtest/functional/p2p-segwit.py21
-rw-r--r--test/functional/test_framework/authproxy.py7
-rwxr-xr-xtest/functional/test_framework/mininode.py7
-rwxr-xr-xtest/functional/test_framework/test_framework.py213
-rw-r--r--test/functional/test_framework/util.py81
12 files changed, 290 insertions, 208 deletions
diff --git a/.travis.yml b/.travis.yml
index a479e46f44..97bb475e4b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -43,6 +43,7 @@ install:
- if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get update; fi
- if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get install --no-install-recommends --no-upgrade -qq $PACKAGES; fi
before_script:
+ - if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then contrib/devtools/commit-script-check.sh $TRAVIS_COMMIT_RANGE; fi
- unset CC; unset CXX
- if [ "$CHECK_DOC" = 1 ]; then contrib/devtools/check-doc.py; fi
- mkdir -p depends/SDKs depends/sdk-sources
diff --git a/contrib/devtools/commit-script-check.sh b/contrib/devtools/commit-script-check.sh
new file mode 100755
index 0000000000..add4bb4883
--- /dev/null
+++ b/contrib/devtools/commit-script-check.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+# This simple script checks for commits beginning with: scripted-diff:
+# If found, looks for a script between the lines -BEGIN VERIFY SCRIPT- and
+# -END VERIFY SCRIPT-. If no ending is found, it reads until the end of the
+# commit message.
+
+# The resulting script should exactly transform the previous commit into the current
+# one. Any remaining diff signals an error.
+
+if test "x$1" = "x"; then
+ echo "Usage: $0 <commit>..."
+ exit 1
+fi
+
+RET=0
+PREV_BRANCH=`git name-rev --name-only HEAD`
+PREV_HEAD=`git rev-parse HEAD`
+for i in `git rev-list --reverse $1`; do
+ git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:" || continue
+ git checkout --quiet $i^ || exit
+ SCRIPT="`git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`"
+ if test "x$SCRIPT" = "x"; then
+ echo "Error: missing script for: $i"
+ echo "Failed"
+ RET=1
+ else
+ echo "Running script for: $i"
+ echo "$SCRIPT"
+ eval "$SCRIPT"
+ git --no-pager diff --exit-code $i && echo "OK" || (echo "Failed"; false) || RET=1
+ fi
+ git reset --quiet --hard HEAD
+done
+git checkout --quiet $PREV_BRANCH 2>/dev/null || git checkout --quiet $PREV_HEAD
+exit $RET
diff --git a/src/net.cpp b/src/net.cpp
index ed4c752606..1d90804627 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -948,7 +948,7 @@ bool CConnman::AttemptToEvictConnection()
continue;
if (node->fDisconnect)
continue;
- NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime,
+ NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime,
node->nLastBlockTime, node->nLastTXTime,
(node->nServices & nRelevantServices) == nRelevantServices,
node->fRelayTxes, node->pfilter != NULL, node->addr, node->nKeyedNetGroup};
@@ -1374,7 +1374,7 @@ void CConnman::ThreadSocketHandler()
{
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0)
{
- LogPrint(BCLog::NET, "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
+ LogPrint(BCLog::NET, "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->GetId());
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL)
@@ -1394,7 +1394,7 @@ void CConnman::ThreadSocketHandler()
}
else if (!pnode->fSuccessfullyConnected)
{
- LogPrintf("version handshake timeout from %d\n", pnode->id);
+ LogPrintf("version handshake timeout from %d\n", pnode->GetId());
pnode->fDisconnect = true;
}
}
@@ -2487,7 +2487,7 @@ bool CConnman::DisconnectNode(NodeId id)
{
LOCK(cs_vNodes);
for(CNode* pnode : vNodes) {
- if (id == pnode->id) {
+ if (id == pnode->GetId()) {
pnode->fDisconnect = true;
return true;
}
@@ -2625,10 +2625,10 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
nTimeConnected(GetSystemTimeInSeconds()),
addr(addrIn),
fInbound(fInboundIn),
- id(idIn),
nKeyedNetGroup(nKeyedNetGroupIn),
addrKnown(5000, 0.001),
filterInventoryKnown(50000, 0.000001),
+ id(idIn),
nLocalHostNonce(nLocalHostNonceIn),
nLocalServices(nLocalServicesIn),
nMyStartingHeight(nMyStartingHeightIn),
@@ -2744,7 +2744,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
size_t nMessageSize = msg.data.size();
size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
- LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
+ LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->GetId());
std::vector<unsigned char> serializedHeader;
serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
@@ -2782,7 +2782,7 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
CNode* found = nullptr;
LOCK(cs_vNodes);
for (auto&& pnode : vNodes) {
- if(pnode->id == id) {
+ if(pnode->GetId() == id) {
found = pnode;
break;
}
diff --git a/src/net.h b/src/net.h
index 4a63ef84e7..8b0ebc0ffe 100644
--- a/src/net.h
+++ b/src/net.h
@@ -611,7 +611,6 @@ public:
CCriticalSection cs_filter;
CBloomFilter* pfilter;
std::atomic<int> nRefCount;
- const NodeId id;
const uint64_t nKeyedNetGroup;
std::atomic_bool fPauseRecv;
@@ -682,6 +681,7 @@ public:
private:
CNode(const CNode&);
void operator=(const CNode&);
+ const NodeId id;
const uint64_t nLocalHostNonce;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index debab436f6..4d924b5cdb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -815,7 +815,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
!PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
- hashBlock.ToString(), pnode->id);
+ hashBlock.ToString(), pnode->GetId());
connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
state.pindexBestHeaderSent = pindex;
}
@@ -950,7 +950,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connma
auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
if (pnode->nVersion >= CADDR_TIME_VERSION) {
- uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize();
+ uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
for (unsigned int i = 0; i < nRelayNodes; i++) {
if (hashKey > best[i].first) {
std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
@@ -1176,7 +1176,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
if (req.indexes[i] >= block.vtx.size()) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
+ LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId());
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
@@ -1189,7 +1189,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
{
- LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
+ LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
if (IsArgSet("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 0)) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
@@ -1268,7 +1268,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
if (pfrom->nServicesExpected & ~nServices)
{
- LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
+ LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, pfrom->nServicesExpected);
connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
pfrom->fDisconnect = true;
@@ -1278,7 +1278,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
- LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion);
+ LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
pfrom->fDisconnect = true;
@@ -1380,7 +1380,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
cleanSubVer, pfrom->nVersion,
- pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
+ pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
remoteAddr);
int64_t nTimeOffset = nTime - GetTime();
@@ -1552,8 +1552,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
uint32_t nFetchFlags = GetFetchFlags(pfrom);
- std::vector<CInv> vToFetch;
-
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
CInv &inv = vInv[nInv];
@@ -1562,7 +1560,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return true;
bool fAlreadyHave = AlreadyHave(inv);
- LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
+ LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
if (inv.type == MSG_TX) {
inv.type |= nFetchFlags;
@@ -1577,14 +1575,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// we now only provide a getheaders response here. When we receive the headers, we will
// then ask for the blocks we need.
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
- LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
}
}
else
{
pfrom->AddInventoryKnown(inv);
if (fBlocksOnly) {
- LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->GetId());
} else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
pfrom->AskFor(inv);
}
@@ -1593,9 +1591,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
}
-
- if (!vToFetch.empty())
- connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vToFetch));
}
@@ -1610,10 +1605,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return error("message getdata size() = %u", vInv.size());
}
- LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
+ LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
if (vInv.size() > 0) {
- LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
}
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
@@ -1653,7 +1648,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
- LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
+ LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
@@ -1703,7 +1698,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
- LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
+ LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->GetId());
return true;
}
@@ -1715,7 +1710,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// might maliciously send lots of getblocktxn requests to trigger
// expensive disk reads, because it will require the peer to
// actually receive all the data read from disk over the network.
- LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
+ LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
CInv inv;
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
@@ -1740,7 +1735,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
- LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
return true;
}
@@ -1765,7 +1760,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
std::vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
- LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
@@ -1795,7 +1790,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
{
- LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
return true;
}
@@ -1828,7 +1823,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
pfrom->nLastTXTime = GetTime();
LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
- pfrom->id,
+ pfrom->GetId(),
tx.GetHash().ToString(),
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
@@ -1948,10 +1943,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// case.
int nDoS = 0;
if (!state.IsInvalid(nDoS) || nDoS == 0) {
- LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
+ LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
RelayTransaction(tx, connman);
} else {
- LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
+ LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
}
}
}
@@ -1963,7 +1958,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (state.IsInvalid(nDoS))
{
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
- pfrom->id,
+ pfrom->GetId(),
FormatStateMessage(state));
if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
@@ -2000,7 +1995,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
Misbehaving(pfrom->GetId(), nDoS);
}
- LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id);
+ LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId());
return true;
}
}
@@ -2079,7 +2074,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id);
+ LogPrintf("Peer %d sent us invalid compact block\n", pfrom->GetId());
return true;
} else if (status == READ_STATUS_FAILED) {
// Duplicate txindexes, the block is now in-flight, so just request it
@@ -2184,7 +2179,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
it->second.first != pfrom->GetId()) {
- LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
+ LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
return true;
}
@@ -2193,7 +2188,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id);
+ LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId());
return true;
} else if (status == READ_STATUS_FAILED) {
// Might have collided, fall back to getdata now :(
@@ -2281,7 +2276,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
pindexBestHeader->nHeight,
- pfrom->id, nodestate->nUnconnectingHeaders);
+ pfrom->GetId(), nodestate->nUnconnectingHeaders);
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
@@ -2319,7 +2314,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
}
nodestate->nUnconnectingHeaders = 0;
@@ -2330,7 +2325,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
- LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
+ LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
}
@@ -2370,7 +2365,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom->id);
+ pindex->GetBlockHash().ToString(), pfrom->GetId());
}
if (vGetData.size() > 1) {
LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
@@ -2393,7 +2388,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
vRecv >> *pblock;
- LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
@@ -2425,14 +2420,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if (!pfrom->fInbound) {
- LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
return true;
}
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (pfrom->fSentAddr) {
- LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
return true;
}
pfrom->fSentAddr = true;
@@ -2533,7 +2528,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (!(sProblem.empty())) {
LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
- pfrom->id,
+ pfrom->GetId(),
sProblem,
pfrom->nPingNonceSent,
nonce,
@@ -2610,7 +2605,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(pfrom->cs_feeFilter);
pfrom->minFeeFilter = newFeeFilter;
}
- LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
}
}
@@ -2621,7 +2616,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
else {
// Ignore unknown commands for extensibility
- LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
+ LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
}
@@ -2701,7 +2696,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
msg.SetVersion(pfrom->GetRecvVersion());
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
- LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
+ LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
pfrom->fDisconnect = true;
return false;
}
@@ -2710,7 +2705,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid(chainparams.MessageStart()))
{
- LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
+ LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->GetId());
return fMoreWork;
}
std::string strCommand = hdr.GetCommand();
@@ -2770,7 +2765,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, const std::atomic<bool>& i
}
if (!fRet) {
- LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
+ LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
}
LOCK(cs_main);
@@ -2899,7 +2894,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
- LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
}
}
@@ -2929,7 +2924,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
(!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
const CBlockIndex *pBestIndex = NULL; // last header queued for delivery
- ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
+ ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
bool fFoundStartingHeader = false;
@@ -2984,7 +2979,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// We only send up to 1 block as header-and-ids, as otherwise
// probably means we're doing an initial-ish-sync or they're slow
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
- vHeaders.front().GetHash().ToString(), pto->id);
+ vHeaders.front().GetHash().ToString(), pto->GetId());
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
@@ -3014,10 +3009,10 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
- vHeaders.back().GetHash().ToString(), pto->id);
+ vHeaders.back().GetHash().ToString(), pto->GetId());
} else {
LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
- vHeaders.front().GetHash().ToString(), pto->id);
+ vHeaders.front().GetHash().ToString(), pto->GetId());
}
connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
state.pindexBestHeaderSent = pBestIndex;
@@ -3046,7 +3041,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
if (!PeerHasHeader(&state, pindex)) {
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
- pto->id, hashToAnnounce.ToString());
+ pto->GetId(), hashToAnnounce.ToString());
}
}
}
@@ -3193,7 +3188,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
- LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
+ LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
return true;
}
@@ -3206,7 +3201,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
- LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
+ LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
pto->fDisconnect = true;
return true;
}
@@ -3225,7 +3220,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
- pindex->nHeight, pto->id);
+ pindex->nHeight, pto->GetId());
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
@@ -3243,7 +3238,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
- LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->id);
+ LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index aec102c9a5..8599795d21 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -412,6 +412,7 @@ UniValue getrawmempool(const JSONRPCRequest& request)
throw std::runtime_error(
"getrawmempool ( verbose )\n"
"\nReturns all transaction ids in memory pool as a json array of string transaction ids.\n"
+ "\nHint: use getmempoolentry to fetch a specific transaction from the mempool.\n"
"\nArguments:\n"
"1. verbose (boolean, optional, default=false) True for a json object, false for array of transaction ids\n"
"\nResult: (for verbose = false):\n"
diff --git a/test/functional/abandonconflict.py b/test/functional/abandonconflict.py
index 0439d168de..9748757641 100755
--- a/test/functional/abandonconflict.py
+++ b/test/functional/abandonconflict.py
@@ -10,10 +10,8 @@
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
-
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-import urllib.parse
class AbandonConflictTest(BitcoinTestFramework):
def __init__(self):
@@ -37,8 +35,8 @@ class AbandonConflictTest(BitcoinTestFramework):
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
- url = urllib.parse.urlparse(self.nodes[1].url)
- self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
+ # Disconnect nodes so node0's transactions don't get into node1's mempool
+ disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
@@ -78,8 +76,9 @@ class AbandonConflictTest(BitcoinTestFramework):
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"])
- # Verify txs no longer in mempool
+ # Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py
index 335777b2ab..24d4d37c42 100755
--- a/test/functional/p2p-segwit.py
+++ b/test/functional/p2p-segwit.py
@@ -61,16 +61,6 @@ class TestNode(NodeConnCB):
self.send_message(msg)
self.wait_for_getdata()
- def announce_block(self, block, use_header):
- with mininode_lock:
- self.last_message.pop("getdata", None)
- if use_header:
- msg = msg_headers()
- msg.headers = [ CBlockHeader(block) ]
- self.send_message(msg)
- else:
- self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
-
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
@@ -926,9 +916,9 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
- self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
+ self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
- self.old_node.wait_for_inv(CInv(1, tx3.sha256))
+ self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
@@ -1029,13 +1019,18 @@ class SegWitTest(BitcoinTestFramework):
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
+
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
- self.old_node.announce_block(block4, use_header=False)
+ # Since 0.14, inv's will only be responded to with a getheaders, so send a header
+ # to announce this block.
+ msg = msg_headers()
+ msg.headers = [ CBlockHeader(block4) ]
+ self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py
index 9ab3094b06..dfcc524313 100644
--- a/test/functional/test_framework/authproxy.py
+++ b/test/functional/test_framework/authproxy.py
@@ -42,6 +42,7 @@ import decimal
import json
import logging
import socket
+import time
try:
import urllib.parse as urlparse
except ImportError:
@@ -163,6 +164,7 @@ class AuthServiceProxy(object):
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
+ req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
@@ -183,8 +185,9 @@ class AuthServiceProxy(object):
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
+ elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
- log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
+ log.debug("<-%s- [%.6f] %s"%(response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
- log.debug("<-- "+responsedata)
+ log.debug("<-- [%.6f] %s"%(elapsed,responsedata))
return response
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 03db0d1092..70bba566c7 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -1604,7 +1604,12 @@ class NodeConnCB(object):
assert wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
- test_function = lambda: self.last_message.get("inv") and self.last_message["inv"] != expected_inv
+ """Waits for an INV message and checks that the first inv object in the message was as expected."""
+ if len(expected_inv) > 1:
+ raise NotImplementedError("wait_for_inv() will only verify the first inv object")
+ test_function = lambda: self.last_message.get("inv") and \
+ self.last_message["inv"].inv[0].type == expected_inv[0].type and \
+ self.last_message["inv"].inv[0].hash == expected_inv[0].hash
assert wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 3832f04ecd..8d8139e4e4 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -8,28 +8,55 @@ from collections import deque
import logging
import optparse
import os
-import sys
import shutil
+import subprocess
+import sys
import tempfile
import time
from .util import (
- initialize_chain,
- start_nodes,
+ PortSeed,
+ MAX_NODES,
+ bitcoind_processes,
+ check_json_precision,
connect_nodes_bi,
+ disable_mocktime,
disconnect_nodes,
+ enable_coverage,
+ enable_mocktime,
+ get_mocktime,
+ get_rpc_proxy,
+ initialize_datadir,
+ log_filename,
+ p2p_port,
+ rpc_url,
+ set_node_times,
+ start_node,
+ start_nodes,
+ stop_node,
+ stop_nodes,
sync_blocks,
sync_mempools,
- stop_nodes,
- stop_node,
- enable_coverage,
- check_json_precision,
- initialize_chain_clean,
- PortSeed,
+ wait_for_bitcoind_start,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
+ """Base class for a bitcoin test script.
+
+ Individual bitcoin test scripts should subclass this class and override the following methods:
+
+ - __init__()
+ - add_options()
+ - setup_chain()
+ - setup_network()
+ - run_test()
+
+ The main() method should not be overridden.
+
+ This class also contains various public and private helper methods."""
+
+ # Methods to override in subclass test scripts.
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
@@ -40,27 +67,15 @@ class BitcoinTestFramework(object):
self.setup_clean_chain = False
self.nodes = None
- def run_test(self):
- raise NotImplementedError
-
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
- initialize_chain_clean(self.options.tmpdir, self.num_nodes)
+ self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
- initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
-
- def stop_node(self, num_node):
- stop_node(self.nodes[num_node], num_node)
-
- def setup_nodes(self):
- extra_args = None
- if hasattr(self, "extra_args"):
- extra_args = self.extra_args
- self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def setup_network(self):
self.setup_nodes()
@@ -72,27 +87,16 @@ class BitcoinTestFramework(object):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
- def split_network(self):
- """
- Split the network of four nodes into nodes 0/1 and 2/3.
- """
- disconnect_nodes(self.nodes[1], 2)
- disconnect_nodes(self.nodes[2], 1)
- self.sync_all([self.nodes[:2], self.nodes[2:]])
-
- def sync_all(self, node_groups=None):
- if not node_groups:
- node_groups = [self.nodes]
+ def setup_nodes(self):
+ extra_args = None
+ if hasattr(self, "extra_args"):
+ extra_args = self.extra_args
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
- [sync_blocks(group) for group in node_groups]
- [sync_mempools(group) for group in node_groups]
+ def run_test(self):
+ raise NotImplementedError
- def join_network(self):
- """
- Join the (previously split) network halves together.
- """
- connect_nodes_bi(self.nodes, 1, 2)
- self.sync_all()
+ # Main function. This should not be overridden by the subclass test scripts.
def main(self):
@@ -156,7 +160,7 @@ class BitcoinTestFramework(object):
if not self.options.noshutdown:
self.log.info("Stopping nodes")
- stop_nodes(self.nodes)
+ self.stop_nodes()
else:
self.log.info("Note: bitcoinds were not stopped and may still be running")
@@ -190,6 +194,45 @@ class BitcoinTestFramework(object):
logging.shutdown()
sys.exit(self.TEST_EXIT_FAILED)
+ # Public helper methods. These can be accessed by the subclass test scripts.
+
+ def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
+ return start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
+
+ def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
+ return start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary)
+
+ def stop_node(self, num_node):
+ stop_node(self.nodes[num_node], num_node)
+
+ def stop_nodes(self):
+ stop_nodes(self.nodes)
+
+ def split_network(self):
+ """
+ Split the network of four nodes into nodes 0/1 and 2/3.
+ """
+ disconnect_nodes(self.nodes[1], 2)
+ disconnect_nodes(self.nodes[2], 1)
+ self.sync_all([self.nodes[:2], self.nodes[2:]])
+
+ def join_network(self):
+ """
+ Join the (previously split) network halves together.
+ """
+ connect_nodes_bi(self.nodes, 1, 2)
+ self.sync_all()
+
+ def sync_all(self, node_groups=None):
+ if not node_groups:
+ node_groups = [self.nodes]
+
+ for group in node_groups:
+ sync_blocks(group)
+ sync_mempools(group)
+
+ # Private helper methods. These should not be accessed by the subclass test scripts.
+
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
@@ -218,6 +261,88 @@ class BitcoinTestFramework(object):
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
+ def _initialize_chain(self, test_dir, num_nodes, cachedir):
+ """Initialize a pre-mined blockchain for use by the test.
+
+ Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
+ Afterward, create num_nodes copies from the cache."""
+
+ assert num_nodes <= MAX_NODES
+ create_cache = False
+ for i in range(MAX_NODES):
+ if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
+ create_cache = True
+ break
+
+ if create_cache:
+ self.log.debug("Creating data directories from cached datadir")
+
+ # find and delete old cache directories if any exist
+ for i in range(MAX_NODES):
+ if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
+ shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
+
+ # Create cache directories, run bitcoinds:
+ for i in range(MAX_NODES):
+ datadir = initialize_datadir(cachedir, i)
+ args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
+ if i > 0:
+ args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
+ bitcoind_processes[i] = subprocess.Popen(args)
+ self.log.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
+ wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
+ self.log.debug("initialize_chain: RPC successfully started")
+
+ self.nodes = []
+ for i in range(MAX_NODES):
+ try:
+ self.nodes.append(get_rpc_proxy(rpc_url(i), i))
+ except:
+ self.log.exception("Error connecting to node %d" % i)
+ sys.exit(1)
+
+ # Create a 200-block-long chain; each of the 4 first nodes
+ # gets 25 mature blocks and 25 immature.
+ # Note: To preserve compatibility with older versions of
+ # initialize_chain, only 4 nodes will generate coins.
+ #
+ # blocks are created with timestamps 10 minutes apart
+ # starting from 2010 minutes in the past
+ enable_mocktime()
+ block_time = get_mocktime() - (201 * 10 * 60)
+ for i in range(2):
+ for peer in range(4):
+ for j in range(25):
+ set_node_times(self.nodes, block_time)
+ self.nodes[peer].generate(1)
+ block_time += 10 * 60
+ # Must sync before next peer starts generating blocks
+ sync_blocks(self.nodes)
+
+ # Shut them down, and clean up cache directories:
+ self.stop_nodes()
+ self.nodes = []
+ disable_mocktime()
+ for i in range(MAX_NODES):
+ os.remove(log_filename(cachedir, i, "debug.log"))
+ os.remove(log_filename(cachedir, i, "db.log"))
+ os.remove(log_filename(cachedir, i, "peers.dat"))
+ os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
+
+ for i in range(num_nodes):
+ from_dir = os.path.join(cachedir, "node" + str(i))
+ to_dir = os.path.join(test_dir, "node" + str(i))
+ shutil.copytree(from_dir, to_dir)
+ initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
+
+ def _initialize_chain_clean(self, test_dir, num_nodes):
+ """Initialize empty blockchain for use by the test.
+
+ Create an empty blockchain and num_nodes wallets.
+ Useful if a test case wants complete control over initialization."""
+ for i in range(num_nodes):
+ initialize_datadir(test_dir, i)
+
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
@@ -240,7 +365,7 @@ class ComparisonTestFramework(BitcoinTestFramework):
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
- self.nodes = start_nodes(
+ self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 9186c3cbe9..2b56fe8d62 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -226,87 +226,6 @@ def wait_for_bitcoind_start(process, url, i):
raise # unknown JSON RPC exception
time.sleep(0.25)
-def initialize_chain(test_dir, num_nodes, cachedir):
- """
- Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
- Afterward, create num_nodes copies from the cache
- """
-
- assert num_nodes <= MAX_NODES
- create_cache = False
- for i in range(MAX_NODES):
- if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
- create_cache = True
- break
-
- if create_cache:
- logger.debug("Creating data directories from cached datadir")
-
- #find and delete old cache directories if any exist
- for i in range(MAX_NODES):
- if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
- shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
-
- # Create cache directories, run bitcoinds:
- for i in range(MAX_NODES):
- datadir=initialize_datadir(cachedir, i)
- args = [ os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
- if i > 0:
- args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
- bitcoind_processes[i] = subprocess.Popen(args)
- logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
- wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
- logger.debug("initialize_chain: RPC successfully started")
-
- rpcs = []
- for i in range(MAX_NODES):
- try:
- rpcs.append(get_rpc_proxy(rpc_url(i), i))
- except:
- sys.stderr.write("Error connecting to "+url+"\n")
- sys.exit(1)
-
- # Create a 200-block-long chain; each of the 4 first nodes
- # gets 25 mature blocks and 25 immature.
- # Note: To preserve compatibility with older versions of
- # initialize_chain, only 4 nodes will generate coins.
- #
- # blocks are created with timestamps 10 minutes apart
- # starting from 2010 minutes in the past
- enable_mocktime()
- block_time = get_mocktime() - (201 * 10 * 60)
- for i in range(2):
- for peer in range(4):
- for j in range(25):
- set_node_times(rpcs, block_time)
- rpcs[peer].generate(1)
- block_time += 10*60
- # Must sync before next peer starts generating blocks
- sync_blocks(rpcs)
-
- # Shut them down, and clean up cache directories:
- stop_nodes(rpcs)
- disable_mocktime()
- for i in range(MAX_NODES):
- os.remove(log_filename(cachedir, i, "debug.log"))
- os.remove(log_filename(cachedir, i, "db.log"))
- os.remove(log_filename(cachedir, i, "peers.dat"))
- os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
-
- for i in range(num_nodes):
- from_dir = os.path.join(cachedir, "node"+str(i))
- to_dir = os.path.join(test_dir, "node"+str(i))
- shutil.copytree(from_dir, to_dir)
- initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
-
-def initialize_chain_clean(test_dir, num_nodes):
- """
- Create an empty blockchain and num_nodes wallets.
- Useful if a test case wants complete control over initialization.
- """
- for i in range(num_nodes):
- datadir=initialize_datadir(test_dir, i)
-
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""