aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/init.cpp2
-rw-r--r--src/main.cpp32
-rw-r--r--src/main.h5
-rw-r--r--src/scheduler.cpp7
4 files changed, 37 insertions, 9 deletions
diff --git a/src/init.cpp b/src/init.cpp
index f2b13b627a..eab8de8d02 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -412,7 +412,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified bip9 deployment (regtest-only)");
}
- string debugCategories = "addrman, alert, bench, coindb, db, http, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
+ string debugCategories = "addrman, alert, bench, cmpctblock, coindb, db, http, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
if (mode == HMM_BITCOIN_QT)
debugCategories += ", qt";
strUsage += HelpMessageOpt("-debug=<category>", strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + ". " +
diff --git a/src/main.cpp b/src/main.cpp
index 96734b839f..191bcff4cc 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -495,9 +495,13 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf
return;
}
if (nodestate->fProvidesHeaderAndIDs) {
- BOOST_FOREACH(const NodeId nodeid, lNodesAnnouncingHeaderAndIDs)
- if (nodeid == pfrom->GetId())
+ for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
+ if (*it == pfrom->GetId()) {
+ lNodesAnnouncingHeaderAndIDs.erase(it);
+ lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
return;
+ }
+ }
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = (nLocalServices & NODE_WITNESS) ? 2 : 1;
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
@@ -4851,7 +4855,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
- if (mi->second->nHeight >= chainActive.Height() - 10) {
+ if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
} else
@@ -5397,8 +5401,20 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
- if (it->second->nHeight < chainActive.Height() - 15) {
- LogPrint("net", "Peer %d sent us a getblocktxn for a block > 15 deep", pfrom->id);
+ if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
+ // If an older block is requested (should never happen in practice,
+ // but can happen in tests) send a block response instead of a
+ // blocktxn response. Sending a full block response instead of a
+ // small blocktxn response is preferable in the case where a peer
+ // might maliciously send lots of getblocktxn requests to trigger
+ // expensive disk reads, because it will require the peer to
+ // actually receive all the data read from disk over the network.
+ LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
+ CInv inv;
+ inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
+ inv.hash = req.blockhash;
+ pfrom->vRecvGetData.push_back(inv);
+ ProcessGetData(pfrom, chainparams.GetConsensus());
return true;
}
@@ -5727,6 +5743,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
+ if (!fAlreadyInFlight && mapBlocksInFlight.size() == 1 && pindex->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // We seem to be rather well-synced, so it appears pfrom was the first to provide us
+ // with this block! Let's get them to announce using compact blocks in the future.
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom);
+ }
+
BlockTransactionsRequest req;
for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
if (!partialBlock.IsTxAvailable(i))
diff --git a/src/main.h b/src/main.h
index daf884337d..024f35cd39 100644
--- a/src/main.h
+++ b/src/main.h
@@ -89,6 +89,11 @@ static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
+/** Maximum depth of blocks we're willing to serve as compact blocks to peers
+ * when requested. For older blocks, a regular BLOCK response will be sent. */
+static const int MAX_CMPCTBLOCK_DEPTH = 5;
+/** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
+static const int MAX_BLOCKTXN_DEPTH = 10;
/** Size of the "block download window": how far ahead of our current height do we fetch?
* Larger windows tolerate larger download speed differences between peer, but increase the potential
* degree of disordering of blocks on disk (which make reindexing and in the future perhaps pruning
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 52777b61f9..27c03f154c 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -54,9 +54,10 @@ void CScheduler::serviceQueue()
#else
// Some boost versions have a conflicting overload of wait_until that returns void.
// Explicitly use a template here to avoid hitting that overload.
- while (!shouldStop() && !taskQueue.empty() &&
- newTaskScheduled.wait_until<>(lock, taskQueue.begin()->first) != boost::cv_status::timeout) {
- // Keep waiting until timeout
+ while (!shouldStop() && !taskQueue.empty()) {
+ boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
+ if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
+ break; // Exit loop after timeout, it means we reached the time of the event
}
#endif
// If there are multiple threads, the queue can empty while we're waiting (another