aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
authorSuhas Daftuar <sdaftuar@gmail.com>2022-01-26 18:29:56 -0500
committerSuhas Daftuar <sdaftuar@gmail.com>2022-02-22 11:34:05 -0500
commitef6dbe6863d92710fd2da7781e5b2aac87578751 (patch)
treec3e631f9c8a81987feba60d2f68394c6c0c2b672 /src/net_processing.cpp
parentd0bf9bb6a539f151ec92725d20a2b6c22cb095a5 (diff)
downloadbitcoin-ef6dbe6863d92710fd2da7781e5b2aac87578751.tar.xz
Respond to getheaders if we have sufficient chainwork
Previously, we would check to see if we were in IBD and ignore getheaders requests accordingly. However, the IBD criteria -- an optimization mostly targeted at behavior when we have peers serving us many blocks we need to download -- is difficult to reason about in edge-case scenarios, such as if the network were to go a long time without any blocks found and nodes getting restarted during that time. To make things simpler to reason about, just use nMinimumChainWork as our anti-DoS threshold; as long as our chain has that much work, it should be fine to respond to a peer asking for our headers (and this should allow such a peer to request blocks from us if needed).
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp18
1 files changed, 16 insertions, 2 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 273cb4fccb..ac6b4654f1 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -3189,9 +3189,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
+ if (fImporting || fReindex) {
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
+ return;
+ }
+
LOCK(cs_main);
- if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !pfrom.HasPermission(NetPermissionFlags::Download)) {
- LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
+
+ // Note that if we were to be on a chain that forks from the checkpointed
+ // chain, then serving those headers to a peer that has seen the
+ // checkpointed chain would cause that peer to disconnect us. Requiring
+ // that our chainwork exceed nMinimumChainWork is a protection against
+ // being fed a bogus chain when we started up for the first time and
+ // getting partitioned off the honest network for serving that chain to
+ // others.
+ if (m_chainman.ActiveTip() == nullptr ||
+ (m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) {
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work\n", pfrom.GetId());
return;
}