aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am3
-rw-r--r--src/init.cpp2
-rw-r--r--src/net_processing.cpp463
-rw-r--r--src/rpc/blockchain.cpp6
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mempool.cpp146
-rw-r--r--src/test/fuzz/rpc.cpp1
-rw-r--r--src/util/error.cpp2
-rw-r--r--src/util/error.h1
-rw-r--r--src/wallet/bdb.cpp6
-rw-r--r--src/wallet/walletdb.cpp10
11 files changed, 449 insertions, 192 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index 488ff0e273..bc0982f74d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -854,8 +854,7 @@ endif
# TODO: libbitcoinkernel is a work in progress consensus engine library, as more
# and more modules are decoupled from the consensus engine, this list will
-# shrink to only those which are absolutely necessary. For example, things
-# like index/*.cpp will be removed.
+# shrink to only those which are absolutely necessary.
libbitcoinkernel_la_SOURCES = \
kernel/bitcoinkernel.cpp \
arith_uint256.cpp \
diff --git a/src/init.cpp b/src/init.cpp
index ee90abcc93..d844e9b169 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -966,7 +966,7 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
peer_connect_timeout = args.GetIntArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
if (peer_connect_timeout <= 0) {
- return InitError(Untranslated("peertimeout cannot be configured with a negative value."));
+ return InitError(Untranslated("peertimeout must be a positive integer."));
}
if (args.IsArgSet("-minrelaytxfee")) {
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index db711be130..c33dd29923 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -61,6 +61,8 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
+/** How long to wait for a peer to respond to a getheaders request */
+static constexpr auto HEADERS_RESPONSE_TIME{2min};
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
@@ -355,6 +357,9 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
+ /** Time of the last getheaders message to this peer */
+ std::atomic<NodeClock::time_point> m_last_getheaders_timestamp{NodeSeconds{}};
+
Peer(NodeId id)
: m_id{id}
{}
@@ -501,7 +506,7 @@ public:
private:
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
- void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -560,6 +565,22 @@ private:
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
+ /** Deal with state tracking and headers sync for peers that send the
+ * occasional non-connecting header (this can happen due to BIP 130 headers
+ * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
+ void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers);
+ /** Return true if the headers connect to each other, false otherwise */
+ bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
+ /** Request further headers from this peer with a given locator.
+ * We don't issue a getheaders message if we have a recent one outstanding.
+ * This returns true if a getheaders is actually sent, and false otherwise.
+ */
+ bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer);
+ /** Potentially fetch blocks from this peer upon receipt of a new headers tip */
+ void HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast);
+ /** Update peer state based on received headers message */
+ void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers);
void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
@@ -2194,6 +2215,203 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
}
+/**
+ * Special handling for unconnecting headers that might be part of a block
+ * announcement.
+ *
+ * We'll send a getheaders message in response to try to connect the chain.
+ *
+ * The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
+ * don't connect before given DoS points.
+ *
+ * Once a headers message is received that is valid and does connect,
+ * nUnconnectingHeaders gets reset back to 0.
+ */
+void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
+ const std::vector<CBlockHeader>& headers)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ nodestate->nUnconnectingHeaders++;
+ // Try to fill in the missing headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), peer)) {
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ headers[0].GetHash().ToString(),
+ headers[0].hashPrevBlock.ToString(),
+ m_chainman.m_best_header->nHeight,
+ pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ // Set hashLastUnknownBlock for this peer, so that if we
+ // eventually get the headers - even from a different peer -
+ // we can use this peer to download.
+ UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
+
+ // The peer may just be broken, so periodically assign DoS points if this
+ // condition persists.
+ if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
+ Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
+ }
+}
+
+bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
+{
+ uint256 hashLastBlock;
+ for (const CBlockHeader& header : headers) {
+ if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
+ return false;
+ }
+ hashLastBlock = header.GetHash();
+ }
+ return true;
+}
+
+bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ const auto current_time = NodeClock::now();
+
+ // Only allow a new getheaders message to go out if we don't have a recent
+ // one already in-flight
+ if (current_time - peer.m_last_getheaders_timestamp.load() > HEADERS_RESPONSE_TIME) {
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
+ peer.m_last_getheaders_timestamp = current_time;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip.
+ * We require that the given tip have at least as much work as our tip, and for
+ * our current tip to be "close to synced" (see CanDirectFetch()).
+ */
+void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
+
+ std::vector<const CBlockIndex*> vToFetch;
+ const CBlockIndex *pindexWalk = pindexLast;
+ // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
+ while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
+ !IsBlockRequested(pindexWalk->GetBlockHash()) &&
+ (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
+ // We don't have this block, and it's not yet in flight.
+ vToFetch.push_back(pindexWalk);
+ }
+ pindexWalk = pindexWalk->pprev;
+ }
+ // If pindexWalk still isn't on our main chain, we're looking at a
+ // very large reorg at a time we think we're close to caught up to
+ // the main chain -- this shouldn't really happen. Bail out on the
+ // direct fetch and rely on parallel download instead.
+ if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
+ LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
+ pindexLast->GetBlockHash().ToString(),
+ pindexLast->nHeight);
+ } else {
+ std::vector<CInv> vGetData;
+ // Download as much as possible, from earliest to latest.
+ for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
+ if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ // Can't download any more from this peer
+ break;
+ }
+ uint32_t nFetchFlags = GetFetchFlags(pfrom);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ BlockRequested(pfrom.GetId(), *pindex);
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ pindex->GetBlockHash().ToString(), pfrom.GetId());
+ }
+ if (vGetData.size() > 1) {
+ LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
+ }
+ if (vGetData.size() > 0) {
+ if (!m_ignore_incoming_txs &&
+ nodestate->m_provides_cmpctblocks &&
+ vGetData.size() == 1 &&
+ mapBlocksInFlight.size() == 1 &&
+ pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
+ }
+ }
+}
+
+/**
+ * Given receipt of headers from a peer ending in pindexLast, along with
+ * whether that header was new and whether the headers message was full,
+ * update the state we keep for the peer.
+ */
+void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom,
+ const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers)
+{
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+ if (nodestate->nUnconnectingHeaders > 0) {
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ nodestate->nUnconnectingHeaders = 0;
+
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
+
+ // From here, pindexBestKnownBlock should be guaranteed to be non-null,
+ // because it is set in UpdateBlockAvailability. Some nullptr checks
+ // are still present, however, as belt-and-suspenders.
+
+ if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
+ nodestate->m_last_block_announcement = GetTime();
+ }
+
+ // If we're in IBD, we want outbound peers that will serve us a useful
+ // chain. Disconnect peers that are on chains with insufficient work.
+ if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !may_have_more_headers) {
+ // If the peer has no more headers to give us, then we know we have
+ // their tip.
+ if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
+ // This peer has too little work on their headers chain to help
+ // us sync -- disconnect if it is an outbound disconnection
+ // candidate.
+ // Note: We compare their tip to nMinimumChainWork (rather than
+ // m_chainman.ActiveChain().Tip()) because we won't start block download
+ // until we have a headers chain that has at least
+ // nMinimumChainWork, even if a peer has a chain past our tip,
+ // as an anti-DoS measure.
+ if (pfrom.IsOutboundOrBlockRelayConn()) {
+ LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
+ pfrom.fDisconnect = true;
+ }
+ }
+ }
+
+ // If this is an outbound full-relay peer, check to see if we should protect
+ // it from the bad/lagging chain logic.
+ // Note that outbound block-relay peers are excluded from this protection, and
+ // thus always subject to eviction under the bad/lagging chain logic.
+ // See ChainSyncTimeoutState.
+ if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
+ if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
+ LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
+ nodestate->m_chain_sync.m_protect = true;
+ ++m_outbound_peers_with_protect_from_disconnect;
+ }
+ }
+}
+
void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
@@ -2206,55 +2424,33 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
return;
}
- bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr;
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- // If this looks like it could be a block announcement (nCount <=
- // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
- // don't connect:
- // - Send a getheaders message in response to try to connect the chain.
- // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
- // don't connect before giving DoS points
- // - Once a headers message is received that is valid and does connect,
- // nUnconnectingHeaders gets reset back to 0.
- if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
- nodestate->nUnconnectingHeaders++;
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
- LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
- headers[0].GetHash().ToString(),
- headers[0].hashPrevBlock.ToString(),
- m_chainman.m_best_header->nHeight,
- pfrom.GetId(), nodestate->nUnconnectingHeaders);
- // Set hashLastUnknownBlock for this peer, so that if we
- // eventually get the headers - even from a different peer -
- // we can use this peer to download.
- UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
-
- if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
- }
- return;
- }
+ // Do these headers connect to something in our block index?
+ bool headers_connect_blockindex{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) != nullptr)};
- uint256 hashLastBlock;
- for (const CBlockHeader& header : headers) {
- if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
- Misbehaving(peer, 20, "non-continuous headers sequence");
- return;
- }
- hashLastBlock = header.GetHash();
+ if (!headers_connect_blockindex) {
+ if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
+ // If this looks like it could be a BIP 130 block announcement, use
+ // special logic for handling headers that don't connect, as this
+ // could be benign.
+ HandleFewUnconnectingHeaders(pfrom, peer, headers);
+ } else {
+ Misbehaving(peer, 10, "invalid header received");
}
+ return;
+ }
- // If we don't have the last header, then they'll have given us
- // something new (if these headers are valid).
- if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
- received_new_header = true;
- }
+ // At this point, the headers connect to something in our block index.
+ if (!CheckHeadersAreContinuous(headers)) {
+ Misbehaving(peer, 20, "non-continuous headers sequence");
+ return;
}
+ // If we don't have the last header, then this peer will have given us
+ // something new (if these headers are valid).
+ bool received_new_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()) == nullptr)};
+
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders(headers, state, &pindexLast)) {
if (state.IsInvalid()) {
@@ -2263,123 +2459,20 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
}
}
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
- }
- nodestate->nUnconnectingHeaders = 0;
-
- assert(pindexLast);
- UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
-
- // From here, pindexBestKnownBlock should be guaranteed to be non-null,
- // because it is set in UpdateBlockAvailability. Some nullptr checks
- // are still present, however, as belt-and-suspenders.
-
- if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
- nodestate->m_last_block_announcement = GetTime();
- }
-
- if (nCount == MAX_HEADERS_RESULTS) {
- // Headers message had its maximum size; the peer may have more headers.
- // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or m_chainman.m_best_header, continue
- // from there instead.
+ // Consider fetching more headers.
+ if (nCount == MAX_HEADERS_RESULTS) {
+ // Headers message had its maximum size; the peer may have more headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(pindexLast), peer)) {
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
- pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
- }
-
- // If this set of headers is valid and ends in a block with at least as
- // much work as our tip, download as much as possible.
- if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
- std::vector<const CBlockIndex*> vToFetch;
- const CBlockIndex *pindexWalk = pindexLast;
- // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
- while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !IsBlockRequested(pindexWalk->GetBlockHash()) &&
- (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
- // We don't have this block, and it's not yet in flight.
- vToFetch.push_back(pindexWalk);
- }
- pindexWalk = pindexWalk->pprev;
- }
- // If pindexWalk still isn't on our main chain, we're looking at a
- // very large reorg at a time we think we're close to caught up to
- // the main chain -- this shouldn't really happen. Bail out on the
- // direct fetch and rely on parallel download instead.
- if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
- LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
- pindexLast->GetBlockHash().ToString(),
- pindexLast->nHeight);
- } else {
- std::vector<CInv> vGetData;
- // Download as much as possible, from earliest to latest.
- for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- // Can't download any more from this peer
- break;
- }
- uint32_t nFetchFlags = GetFetchFlags(pfrom);
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- BlockRequested(pfrom.GetId(), *pindex);
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom.GetId());
- }
- if (vGetData.size() > 1) {
- LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
- pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
- }
- if (vGetData.size() > 0) {
- if (!m_ignore_incoming_txs &&
- nodestate->m_provides_cmpctblocks &&
- vGetData.size() == 1 &&
- mapBlocksInFlight.size() == 1 &&
- pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // In any case, we want to download using a compact block, not a regular one
- vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
- }
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- }
- }
- }
- // If we're in IBD, we want outbound peers that will serve us a useful
- // chain. Disconnect peers that are on chains with insufficient work.
- if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
- // When nCount < MAX_HEADERS_RESULTS, we know we have no more
- // headers to fetch from this peer.
- if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
- // This peer has too little work on their headers chain to help
- // us sync -- disconnect if it is an outbound disconnection
- // candidate.
- // Note: We compare their tip to nMinimumChainWork (rather than
- // m_chainman.ActiveChain().Tip()) because we won't start block download
- // until we have a headers chain that has at least
- // nMinimumChainWork, even if a peer has a chain past our tip,
- // as an anti-DoS measure.
- if (pfrom.IsOutboundOrBlockRelayConn()) {
- LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
- pfrom.fDisconnect = true;
- }
- }
- }
-
- // If this is an outbound full-relay peer, check to see if we should protect
- // it from the bad/lagging chain logic.
- // Note that outbound block-relay peers are excluded from this protection, and
- // thus always subject to eviction under the bad/lagging chain logic.
- // See ChainSyncTimeoutState.
- if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
- if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
- LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
- nodestate->m_chain_sync.m_protect = true;
- ++m_outbound_peers_with_protect_from_disconnect;
- }
+ pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
}
}
+ UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
+
+ // Consider immediately downloading blocks.
+ HeadersDirectFetchBlocks(pfrom, pindexLast);
+
return;
}
@@ -3142,8 +3235,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
if (best_block != nullptr) {
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *best_block));
- LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", m_chainman.m_best_header->nHeight, best_block->ToString(), pfrom.GetId());
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer)) {
+ LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
+ m_chainman.m_best_header->nHeight, best_block->ToString(),
+ pfrom.GetId());
+ }
}
return;
@@ -3316,7 +3412,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// others.
if (m_chainman.ActiveTip() == nullptr ||
(m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) {
- LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work\n", pfrom.GetId());
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
+ // Just respond with an empty headers message, to tell the peer to
+ // go away but not treat us as unresponsive.
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>()));
return;
}
@@ -3597,8 +3696,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
- if (!m_chainman.ActiveChainstate().IsInitialBlockDownload())
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
+ if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
+ MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer);
+ }
return;
}
@@ -3872,6 +3972,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
+ // Assume that this is in response to any outstanding getheaders
+ // request we may have sent, and clear out the time of our last request
+ peer->m_last_getheaders_timestamp.store(NodeSeconds{});
+
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
@@ -4300,7 +4404,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
return fMoreWork;
}
-void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds)
+void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
{
AssertLockHeld(cs_main);
@@ -4338,10 +4442,15 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_
pto.fDisconnect = true;
} else {
assert(state.m_chain_sync.m_work_header);
+ // Here, we assume that the getheaders message goes out,
+ // because it'll either go out or be skipped because of a
+ // getheaders in-flight already, in which case the peer should
+ // still respond to us with a sufficiently high work chain tip.
+ MaybeSendGetHeaders(pto,
+ m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev),
+ peer);
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
- m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
- constexpr auto HEADERS_RESPONSE_TIME{2min};
// Bump the timeout to allow a response, which could clear the timeout
// (if the response shows the peer has synced), reset the timeout (if
// the peer syncs to the required work but not to our tip), or result
@@ -4749,15 +4858,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
- state.fSyncStarted = true;
- state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
- (
- // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
- // to maintain precision
- std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
- (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
- );
- nSyncStarted++;
const CBlockIndex* pindexStart = m_chainman.m_best_header;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
@@ -4768,8 +4868,19 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
- LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
- m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256()));
+ if (MaybeSendGetHeaders(*pto, m_chainman.ActiveChain().GetLocator(pindexStart), *peer)) {
+ LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
+
+ state.fSyncStarted = true;
+ state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
+ (
+ // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
+ // to maintain precision
+ std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
+ (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
+ );
+ nSyncStarted++;
+ }
}
}
@@ -5115,7 +5226,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check that outbound peers have reasonable chains
// GetTime() is used by this anti-DoS logic so we can test this using mocktime
- ConsiderEviction(*pto, GetTime<std::chrono::seconds>());
+ ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
//
// Message: getdata (blocks)
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index af9458206e..6846e992d4 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -845,7 +845,7 @@ static RPCHelpMan gettxoutsetinfo()
"Note this call may take some time if you are not using coinstatsindex.\n",
{
{"hash_type", RPCArg::Type::STR, RPCArg::Default{"hash_serialized_2"}, "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."},
- {"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
+ {"hash_or_height", RPCArg::Type::NUM, RPCArg::DefaultHint{"the current best block"}, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
{"use_index", RPCArg::Type::BOOL, RPCArg::Default{true}, "Use coinstatsindex, if available."},
},
RPCResult{
@@ -881,6 +881,7 @@ static RPCHelpMan gettxoutsetinfo()
HelpExampleCli("gettxoutsetinfo", R"("none")") +
HelpExampleCli("gettxoutsetinfo", R"("none" 1000)") +
HelpExampleCli("gettxoutsetinfo", R"("none" '"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09"')") +
+ HelpExampleCli("-named gettxoutsetinfo", R"(hash_type='muhash' use_index='false')") +
HelpExampleRpc("gettxoutsetinfo", "") +
HelpExampleRpc("gettxoutsetinfo", R"("none")") +
HelpExampleRpc("gettxoutsetinfo", R"("none", 1000)") +
@@ -917,6 +918,9 @@ static RPCHelpMan gettxoutsetinfo()
throw JSONRPCError(RPC_INVALID_PARAMETER, "hash_serialized_2 hash type cannot be queried for a specific block");
}
+ if (!index_requested) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot set use_index to false when querying for a specific block");
+ }
pindex = ParseHashOrHeight(request.params[1], chainman);
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index ae0d0112ba..9be3ab7df0 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -110,6 +110,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "sendrawtransaction", 1, "maxfeerate" },
{ "testmempoolaccept", 0, "rawtxs" },
{ "testmempoolaccept", 1, "maxfeerate" },
+ { "submitpackage", 0, "package" },
{ "combinerawtransaction", 0, "txs" },
{ "fundrawtransaction", 1, "options" },
{ "fundrawtransaction", 2, "iswitness" },
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index 0d614d5ec0..fbb40ab861 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -5,6 +5,7 @@
#include <rpc/blockchain.h>
+#include <chainparams.h>
#include <core_io.h>
#include <fs.h>
#include <policy/rbf.h>
@@ -730,6 +731,150 @@ static RPCHelpMan savemempool()
};
}
+static RPCHelpMan submitpackage()
+{
+ return RPCHelpMan{"submitpackage",
+ "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n"
+ "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n"
+ "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n"
+ "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n"
+ "Currently, each transaction is broadcasted individually after submission, which means they must meet other nodes' feerate requirements alone.\n"
+ ,
+ {
+ {"package", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of raw transactions.",
+ {
+ {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
+ },
+ },
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "tx-results", "transaction results keyed by wtxid",
+ {
+ {RPCResult::Type::OBJ, "wtxid", "transaction wtxid", {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ {RPCResult::Type::STR_HEX, "other-wtxid", /*optional=*/true, "The wtxid of a different transaction with the same txid but different witness found in the mempool. This means the submitted transaction was ignored."},
+ {RPCResult::Type::NUM, "vsize", "Virtual transaction size as defined in BIP 141."},
+ {RPCResult::Type::OBJ, "fees", "Transaction fees", {
+ {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT},
+ }},
+ }}
+ }},
+ {RPCResult::Type::STR_AMOUNT, "package-feerate", /*optional=*/true, "package feerate used for feerate checks in " + CURRENCY_UNIT + " per KvB. Excludes transactions which were deduplicated or accepted individually."},
+ {RPCResult::Type::ARR, "replaced-transactions", /*optional=*/true, "List of txids of replaced transactions",
+ {
+ {RPCResult::Type::STR_HEX, "", "The transaction id"},
+ }},
+ },
+ },
+ RPCExamples{
+ HelpExampleCli("testmempoolaccept", "[rawtx1, rawtx2]") +
+ HelpExampleCli("submitpackage", "[rawtx1, rawtx2]")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only");
+ }
+ RPCTypeCheck(request.params, {
+ UniValue::VARR,
+ });
+ const UniValue raw_transactions = request.params[0].get_array();
+ if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER,
+ "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions.");
+ }
+
+ std::vector<CTransactionRef> txns;
+ txns.reserve(raw_transactions.size());
+ for (const auto& rawtx : raw_transactions.getValues()) {
+ CMutableTransaction mtx;
+ if (!DecodeHexTx(mtx, rawtx.get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
+ "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input.");
+ }
+ txns.emplace_back(MakeTransactionRef(std::move(mtx)));
+ }
+
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ CTxMemPool& mempool = EnsureMemPool(node);
+ CChainState& chainstate = EnsureChainman(node).ActiveChainstate();
+ const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false));
+
+ // First catch any errors.
+ switch(package_result.m_state.GetResult()) {
+ case PackageValidationResult::PCKG_RESULT_UNSET: break;
+ case PackageValidationResult::PCKG_POLICY:
+ {
+ throw JSONRPCTransactionError(TransactionError::INVALID_PACKAGE,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_MEMPOOL_ERROR:
+ {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_ERROR,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_TX:
+ {
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ if (it != package_result.m_tx_results.end() && it->second.m_state.IsInvalid()) {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_REJECTED,
+ strprintf("%s failed: %s", tx->GetHash().ToString(), it->second.m_state.GetRejectReason()));
+ }
+ }
+ // If a PCKG_TX error was returned, there must have been an invalid transaction.
+ NONFATAL_UNREACHABLE();
+ }
+ }
+ for (const auto& tx : txns) {
+ size_t num_submitted{0};
+ std::string err_string;
+ const auto err = BroadcastTransaction(node, tx, err_string, 0, true, true);
+ if (err != TransactionError::OK) {
+ throw JSONRPCTransactionError(err,
+ strprintf("transaction broadcast failed: %s (all transactions were submitted, %d transactions were broadcast successfully)",
+ err_string, num_submitted));
+ }
+ }
+ UniValue rpc_result{UniValue::VOBJ};
+ UniValue tx_result_map{UniValue::VOBJ};
+ std::set<uint256> replaced_txids;
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ CHECK_NONFATAL(it != package_result.m_tx_results.end());
+ UniValue result_inner{UniValue::VOBJ};
+ result_inner.pushKV("txid", tx->GetHash().GetHex());
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS) {
+ result_inner.pushKV("other-wtxid", it->second.m_other_wtxid.value().GetHex());
+ }
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::VALID ||
+ it->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY) {
+ result_inner.pushKV("vsize", int64_t{it->second.m_vsize.value()});
+ UniValue fees(UniValue::VOBJ);
+ fees.pushKV("base", ValueFromAmount(it->second.m_base_fees.value()));
+ result_inner.pushKV("fees", fees);
+ if (it->second.m_replaced_transactions.has_value()) {
+ for (const auto& ptx : it->second.m_replaced_transactions.value()) {
+ replaced_txids.insert(ptx->GetHash());
+ }
+ }
+ }
+ tx_result_map.pushKV(tx->GetWitnessHash().GetHex(), result_inner);
+ }
+ rpc_result.pushKV("tx-results", tx_result_map);
+ if (package_result.m_package_feerate.has_value()) {
+ rpc_result.pushKV("package-feerate", ValueFromAmount(package_result.m_package_feerate.value().GetFeePerK()));
+ }
+ UniValue replaced_list(UniValue::VARR);
+ for (const uint256& hash : replaced_txids) replaced_list.push_back(hash.ToString());
+ rpc_result.pushKV("replaced-transactions", replaced_list);
+ return rpc_result;
+ },
+ };
+}
+
void RegisterMempoolRPCCommands(CRPCTable& t)
{
static const CRPCCommand commands[]{
@@ -742,6 +887,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t)
{"blockchain", &getmempoolinfo},
{"blockchain", &getrawmempool},
{"blockchain", &savemempool},
+ {"hidden", &submitpackage},
};
for (const auto& c : commands) {
t.appendCommand(c.name, &c);
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index e4e83c3f32..26913a41d2 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -159,6 +159,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"signrawtransactionwithkey",
"submitblock",
"submitheader",
+ "submitpackage",
"syncwithvalidationinterfacequeue",
"testmempoolaccept",
"uptime",
diff --git a/src/util/error.cpp b/src/util/error.cpp
index af8cbd0353..22a5964279 100644
--- a/src/util/error.cpp
+++ b/src/util/error.cpp
@@ -35,6 +35,8 @@ bilingual_str TransactionErrorString(const TransactionError err)
return Untranslated("External signer not found");
case TransactionError::EXTERNAL_SIGNER_FAILED:
return Untranslated("External signer failed to sign");
+ case TransactionError::INVALID_PACKAGE:
+ return Untranslated("Transaction rejected due to invalid package");
// no default case, so the compiler can warn about missing cases
}
assert(false);
diff --git a/src/util/error.h b/src/util/error.h
index 4cc35eb1fd..0429de651a 100644
--- a/src/util/error.h
+++ b/src/util/error.h
@@ -32,6 +32,7 @@ enum class TransactionError {
MAX_FEE_EXCEEDED,
EXTERNAL_SIGNER_NOT_FOUND,
EXTERNAL_SIGNER_FAILED,
+ INVALID_PACKAGE,
};
bilingual_str TransactionErrorString(const TransactionError error);
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index f8230f7a1d..dbd768a758 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -315,12 +315,6 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, b
env = database.env.get();
pdb = database.m_db.get();
strFile = fs::PathToString(database.m_filename);
- if (!Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
}
void BerkeleyDatabase::Open()
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index a04446c840..8afd3f416d 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -883,12 +883,10 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (result != DBErrors::LOAD_OK)
return result;
- // Last client version to open this wallet, was previously the file version number
+ // Last client version to open this wallet
int last_client = CLIENT_VERSION;
- m_batch->Read(DBKeys::VERSION, last_client);
-
- int wallet_version = pwallet->GetVersion();
- pwallet->WalletLogPrintf("Wallet File Version = %d\n", wallet_version > 0 ? wallet_version : last_client);
+ bool has_last_client = m_batch->Read(DBKeys::VERSION, last_client);
+ pwallet->WalletLogPrintf("Wallet file version = %d, last client version = %d\n", pwallet->GetVersion(), last_client);
pwallet->WalletLogPrintf("Keys: %u plaintext, %u encrypted, %u w/ metadata, %u total. Unknown wallet records: %u\n",
wss.nKeys, wss.nCKeys, wss.nKeyMeta, wss.nKeys + wss.nCKeys, wss.m_unknown_records);
@@ -909,7 +907,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (wss.fIsEncrypted && (last_client == 40000 || last_client == 50000))
return DBErrors::NEED_REWRITE;
- if (last_client < CLIENT_VERSION) // Update
+ if (!has_last_client || last_client != CLIENT_VERSION) // Update
m_batch->Write(DBKeys::VERSION, CLIENT_VERSION);
if (wss.fAnyUnordered)