diff options
-rw-r--r-- | src/headerssync.cpp | 24 | ||||
-rw-r--r-- | src/logging.cpp | 3 | ||||
-rw-r--r-- | src/logging.h | 1 | ||||
-rw-r--r-- | src/net_processing.cpp | 7 | ||||
-rw-r--r-- | src/validation.cpp | 2 | ||||
-rwxr-xr-x | test/functional/p2p_headers_sync_with_minchainwork.py | 32 |
6 files changed, 46 insertions, 23 deletions
diff --git a/src/headerssync.cpp b/src/headerssync.cpp index 3eca492d81..757b942cd9 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -42,7 +42,7 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus // could try again, if necessary, to sync a longer chain). m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD; - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); + LogPrint(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); } /** Free any memory in use, and mark this object as no longer usable. This is @@ -92,7 +92,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const // If we're in PRESYNC and we get a non-full headers // message, then the peer's chain has ended and definitely doesn't // have enough work, so we can stop our sync. - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height); } } } else if (m_download_state == State::REDOWNLOAD) { @@ -118,7 +118,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const // If we hit our target blockhash, then all remaining headers will be // returned and we can clear any leftover internal state. if (m_redownloaded_headers.empty() && m_process_all_remaining_headers) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); + LogPrint(BCLog::NET, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); } else if (full_headers_message) { // If the headers message is full, we need to request more. ret.request_more = true; @@ -127,7 +127,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const // declining to serve us that full chain again. Give up. // Note that there's no more processing to be done with these // headers, so we can still return success. - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); } } } @@ -150,7 +150,7 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlo // This might be benign -- perhaps our peer reorged away from the chain // they were on. Give up on this sync for now (likely we will start a // new sync with a new starting point). - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height); return false; } @@ -169,7 +169,7 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlo m_redownload_buffer_last_hash = m_chain_start->GetBlockHash(); m_redownload_chain_work = m_chain_start->nChainWork; m_download_state = State::REDOWNLOAD; - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height); + LogPrint(BCLog::NET, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height); } return true; } @@ -188,7 +188,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren // adjustment maximum. if (!PermittedDifficultyTransition(m_consensus_params, next_height, m_last_header_received.nBits, current.nBits)) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height); return false; } @@ -200,7 +200,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren // It's possible the chain grew since we started the sync; so // potentially we could succeed in syncing the peer's chain if we // try again later. - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height); return false; } } @@ -222,7 +222,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he // Ensure that we're working on a header that connects to the chain we're // downloading. if (header.hashPrevBlock != m_redownload_buffer_last_hash) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height); return false; } @@ -236,7 +236,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he if (!PermittedDifficultyTransition(m_consensus_params, next_height, previous_nBits, header.nBits)) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height); return false; } @@ -255,7 +255,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he // target blockhash just because we ran out of commitments. if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { if (m_header_commitments.size() == 0) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); // Somehow our peer managed to feed us a different chain and // we've run out of commitments. return false; @@ -264,7 +264,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he bool expected_commitment = m_header_commitments.front(); m_header_commitments.pop_front(); if (commitment != expected_commitment) { - LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height); + LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height); return false; } } diff --git a/src/logging.cpp b/src/logging.cpp index 92fc31917f..73c4e458bd 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -165,7 +165,6 @@ const CLogCategoryDesc LogCategories[] = #endif {BCLog::UTIL, "util"}, {BCLog::BLOCKSTORE, "blockstorage"}, - {BCLog::HEADERSSYNC, "headerssync"}, {BCLog::ALL, "1"}, {BCLog::ALL, "all"}, }; @@ -264,8 +263,6 @@ std::string LogCategoryToStr(BCLog::LogFlags category) return "util"; case BCLog::LogFlags::BLOCKSTORE: return "blockstorage"; - case BCLog::LogFlags::HEADERSSYNC: - return "headerssync"; case BCLog::LogFlags::ALL: return "all"; } diff --git a/src/logging.h b/src/logging.h index a7f18f7560..50869ad89a 100644 --- a/src/logging.h +++ b/src/logging.h @@ -65,7 +65,6 @@ namespace BCLog { #endif UTIL = (1 << 25), BLOCKSTORE = (1 << 26), - HEADERSSYNC = (1 << 27), ALL = ~(uint32_t)0, }; enum class Level { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 746e3bedde..74700580ad 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2820,6 +2820,13 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, } } + // If our peer has NetPermissionFlags::NoBan privileges, then bypass our + // anti-DoS logic (this saves bandwidth when we connect to a trusted peer + // on startup). + if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { + already_validated_work = true; + } + // At this point, the headers connect to something in our block index. // Do anti-DoS checks to determine if we should process or store for later // processing. diff --git a/src/validation.cpp b/src/validation.cpp index a2a339a5a1..3e17d4cd87 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3718,7 +3718,7 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t { LOCK(cs_main); // Don't report headers presync progress if we already have a post-minchainwork header chain. - // This means we lose reporting for potentially legimate, but unlikely, deep reorgs, but + // This means we lose reporting for potentially legitimate, but unlikely, deep reorgs, but // prevent attackers that spam low-work headers from filling our logs. if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return; // Rate limit headers presync updates to 4 per second, as these are not subject to DoS diff --git a/test/functional/p2p_headers_sync_with_minchainwork.py b/test/functional/p2p_headers_sync_with_minchainwork.py index 6e17525c28..016375222d 100755 --- a/test/functional/p2p_headers_sync_with_minchainwork.py +++ b/test/functional/p2p_headers_sync_with_minchainwork.py @@ -28,9 +28,9 @@ NODE2_BLOCKS_REQUIRED = 2047 class RejectLowDifficultyHeadersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True - self.num_nodes = 3 + self.num_nodes = 4 # Node0 has no required chainwork; node1 requires 15 blocks on top of the genesis block; node2 requires 2047 - self.extra_args = [["-minimumchainwork=0x0", "-checkblockindex=0"], ["-minimumchainwork=0x1f", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0"]] + self.extra_args = [["-minimumchainwork=0x0", "-checkblockindex=0"], ["-minimumchainwork=0x1f", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0", "-whitelist=noban@127.0.0.1"]] def setup_network(self): self.setup_nodes() @@ -40,17 +40,34 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework): def disconnect_all(self): self.disconnect_nodes(0, 1) self.disconnect_nodes(0, 2) + self.disconnect_nodes(0, 3) def reconnect_all(self): self.connect_nodes(0, 1) self.connect_nodes(0, 2) + self.connect_nodes(0, 3) def test_chains_sync_when_long_enough(self): self.log.info("Generate blocks on the node with no required chainwork, and verify nodes 1 and 2 have no new headers in their headers tree") with self.nodes[1].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]), self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]): self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED-1, sync_fun=self.no_op) - for node in self.nodes[1:]: + # Node3 should always allow headers due to noban permissions + self.log.info("Check that node3 will sync headers (due to noban permissions)") + + def check_node3_chaintips(num_tips, tip_hash, height): + node3_chaintips = self.nodes[3].getchaintips() + assert(len(node3_chaintips) == num_tips) + assert { + 'height': height, + 'hash': tip_hash, + 'branchlen': height, + 'status': 'headers-only', + } in node3_chaintips + + check_node3_chaintips(2, self.nodes[0].getbestblockhash(), NODE1_BLOCKS_REQUIRED-1) + + for node in self.nodes[1:3]: chaintips = node.getchaintips() assert(len(chaintips) == 1) assert { @@ -63,7 +80,7 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework): self.log.info("Generate more blocks to satisfy node1's minchainwork requirement, and verify node2 still has no new headers in headers tree") with self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=15)"]): self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED - self.nodes[0].getblockcount(), sync_fun=self.no_op) - self.sync_blocks(self.nodes[0:2]) + self.sync_blocks(self.nodes[0:2]) # node3 will sync headers (noban permissions) but not blocks (due to minchainwork) assert { 'height': 0, @@ -74,10 +91,13 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework): assert(len(self.nodes[2].getchaintips()) == 1) - self.log.info("Generate long chain for node0/node1") + self.log.info("Check that node3 accepted these headers as well") + check_node3_chaintips(2, self.nodes[0].getbestblockhash(), NODE1_BLOCKS_REQUIRED) + + self.log.info("Generate long chain for node0/node1/node3") self.generate(self.nodes[0], NODE2_BLOCKS_REQUIRED-self.nodes[0].getblockcount(), sync_fun=self.no_op) - self.log.info("Verify that node2 will sync the chain when it gets long enough") + self.log.info("Verify that node2 and node3 will sync the chain when it gets long enough") self.sync_blocks() def test_peerinfo_includes_headers_presync_height(self): |