aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/seeds/README.md2
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/bitcoin-chainstate.cpp1
-rw-r--r--src/common/settings.cpp16
-rw-r--r--src/headerssync.cpp2
-rw-r--r--src/init.cpp7
-rw-r--r--src/kernel/chainstatemanager_opts.h1
-rw-r--r--src/net.cpp8
-rw-r--r--src/net.h8
-rw-r--r--src/net_processing.cpp51
-rw-r--r--src/net_processing.h27
-rw-r--r--src/node/miner.cpp6
-rw-r--r--src/protocol.cpp14
-rw-r--r--src/protocol.h43
-rw-r--r--src/rpc/mining.cpp4
-rw-r--r--src/test/fuzz/integer.cpp1
-rw-r--r--src/test/peerman_tests.cpp76
-rw-r--r--src/test/util/setup_common.cpp1
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp1
-rw-r--r--src/timedata.cpp5
-rw-r--r--src/timedata.h3
-rw-r--r--src/validation.cpp10
-rw-r--r--src/validation.h1
-rw-r--r--src/wallet/sqlite.cpp46
-rw-r--r--src/wallet/sqlite.h15
-rw-r--r--src/wallet/test/db_tests.cpp77
-rw-r--r--src/wallet/wallet.cpp20
-rwxr-xr-xtest/functional/test_framework/test_node.py11
-rwxr-xr-xtest/functional/wallet_import_rescan.py8
-rwxr-xr-xtest/functional/wallet_migration.py13
30 files changed, 356 insertions, 123 deletions
diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md
index 6db77cbbea..ad1ac4a64d 100644
--- a/contrib/seeds/README.md
+++ b/contrib/seeds/README.md
@@ -4,7 +4,7 @@ Utility to generate the seeds.txt list that is compiled into the client
(see [src/chainparamsseeds.h](/src/chainparamsseeds.h) and other utilities in [contrib/seeds](/contrib/seeds)).
Be sure to update `PATTERN_AGENT` in `makeseeds.py` to include the current version,
-and remove old versions as necessary (at a minimum when GetDesirableServiceFlags
+and remove old versions as necessary (at a minimum when SeedsServiceFlags()
changes its default return value, as those are the services which seeds are added
to addrman with).
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 46b9495fc1..69d44dbde6 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -117,6 +117,7 @@ BITCOIN_TESTS =\
test/net_tests.cpp \
test/netbase_tests.cpp \
test/orphanage_tests.cpp \
+ test/peerman_tests.cpp \
test/pmt_tests.cpp \
test/policy_fee_tests.cpp \
test/policyestimator_tests.cpp \
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index ee8b0a44c5..c1a71ed749 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -117,7 +117,6 @@ int main(int argc, char* argv[])
const ChainstateManager::Options chainman_opts{
.chainparams = *chainparams,
.datadir = abs_datadir,
- .adjusted_time_callback = NodeClock::now,
.notifications = *notifications,
};
const node::BlockManager::Options blockman_opts{
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 913ad6d76a..db1001111a 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -31,6 +31,9 @@ enum class Source {
CONFIG_FILE_DEFAULT_SECTION
};
+// Json object key for the auto-generated warning comment
+const std::string SETTINGS_WARN_MSG_KEY{"_warning_"};
+
//! Merge settings from multiple sources in precedence order:
//! Forced config > command line > read-write settings file > config file network-specific section > config file default section
//!
@@ -112,6 +115,10 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
break;
}
}
+
+ // Remove auto-generated warning comment from the accessible settings.
+ values.erase(SETTINGS_WARN_MSG_KEY);
+
return errors.empty();
}
@@ -120,12 +127,9 @@ bool WriteSettings(const fs::path& path,
std::vector<std::string>& errors)
{
SettingsValue out(SettingsValue::VOBJ);
- // Add auto-generated warning comment only if it does not exist
- if (!values.contains("_warning_")) {
- out.pushKV("_warning_", strprintf("This file is automatically generated and updated by %s. Please do not edit this file while the node "
- "is running, as any changes might be ignored or overwritten.",
- PACKAGE_NAME));
- }
+ // Add auto-generated warning comment
+ out.pushKV(SETTINGS_WARN_MSG_KEY, strprintf("This file is automatically generated and updated by %s. Please do not edit this file while the node "
+ "is running, as any changes might be ignored or overwritten.", PACKAGE_NAME));
// Push settings values
for (const auto& value : values) {
out.pushKVEnd(value.first, value.second);
diff --git a/src/headerssync.cpp b/src/headerssync.cpp
index 234fc8da60..e1dfe10483 100644
--- a/src/headerssync.cpp
+++ b/src/headerssync.cpp
@@ -41,7 +41,7 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
// exceeds this bound, because it's not possible for a consensus-valid
// chain to be longer than this (at the current time -- in the future we
// could try again, if necessary, to sync a longer chain).
- m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
+ m_max_commitments = 6*(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
LogPrint(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString());
}
diff --git a/src/init.cpp b/src/init.cpp
index b825c8ce21..988daefeec 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1448,7 +1448,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
.datadir = args.GetDataDirNet(),
- .adjusted_time_callback = GetAdjustedTime,
.notifications = *node.notifications,
};
Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
@@ -1743,13 +1742,15 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 12: start node
//// debug print
+ int64_t best_block_time{};
{
LOCK(cs_main);
LogPrintf("block tree size = %u\n", chainman.BlockIndex().size());
chain_active_height = chainman.ActiveChain().Height();
+ best_block_time = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockTime() : chainman.GetParams().GenesisBlock().GetBlockTime();
if (tip_info) {
tip_info->block_height = chain_active_height;
- tip_info->block_time = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockTime() : chainman.GetParams().GenesisBlock().GetBlockTime();
+ tip_info->block_time = best_block_time;
tip_info->verification_progress = GuessVerificationProgress(chainman.GetParams().TxData(), chainman.ActiveChain().Tip());
}
if (tip_info && chainman.m_best_header) {
@@ -1758,7 +1759,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}
LogPrintf("nBestHeight = %d\n", chain_active_height);
- if (node.peerman) node.peerman->SetBestHeight(chain_active_height);
+ if (node.peerman) node.peerman->SetBestBlock(chain_active_height, std::chrono::seconds{best_block_time});
// Map ports with UPnP or NAT-PMP.
StartMapPort(args.GetBoolArg("-upnp", DEFAULT_UPNP), args.GetBoolArg("-natpmp", DEFAULT_NATPMP));
diff --git a/src/kernel/chainstatemanager_opts.h b/src/kernel/chainstatemanager_opts.h
index ee20eabd79..864aac336e 100644
--- a/src/kernel/chainstatemanager_opts.h
+++ b/src/kernel/chainstatemanager_opts.h
@@ -32,7 +32,6 @@ namespace kernel {
struct ChainstateManagerOpts {
const CChainParams& chainparams;
fs::path datadir;
- const std::function<NodeClock::time_point()> adjusted_time_callback{nullptr};
std::optional<bool> check_block_index{};
bool checkpoints_enabled{DEFAULT_CHECKPOINTS_ENABLED};
//! If set, it will override the minimum work we will assume exists on some valid chain.
diff --git a/src/net.cpp b/src/net.cpp
index 10af6e7e91..7c82f01d75 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -203,7 +203,7 @@ static std::vector<CAddress> ConvertSeeds(const std::vector<uint8_t> &vSeedsIn)
while (!s.eof()) {
CService endpoint;
s >> endpoint;
- CAddress addr{endpoint, GetDesirableServiceFlags(NODE_NONE)};
+ CAddress addr{endpoint, SeedsServiceFlags()};
addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - one_week, -one_week);
LogPrint(BCLog::NET, "Added hardcoded seed: %s\n", addr.ToStringAddrPort());
vSeedsOut.push_back(addr);
@@ -2267,7 +2267,7 @@ void CConnman::ThreadDNSAddressSeed()
AddAddrFetch(seed);
} else {
std::vector<CAddress> vAdd;
- ServiceFlags requiredServiceBits = GetDesirableServiceFlags(NODE_NONE);
+ constexpr ServiceFlags requiredServiceBits{SeedsServiceFlags()};
std::string host = strprintf("x%x.%s", requiredServiceBits, seed);
CNetAddr resolveSource;
if (!resolveSource.SetInternal(host)) {
@@ -2638,7 +2638,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
const CAddress addr = m_anchors.back();
m_anchors.pop_back();
if (!addr.IsValid() || IsLocal(addr) || !g_reachable_nets.Contains(addr) ||
- !HasAllDesirableServiceFlags(addr.nServices) ||
+ !m_msgproc->HasAllDesirableServiceFlags(addr.nServices) ||
outbound_ipv46_peer_netgroups.count(m_netgroupman.GetGroup(addr))) continue;
addrConnect = addr;
LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToStringAddrPort());
@@ -2704,7 +2704,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// for non-feelers, require all the services we'll want,
// for feelers, only require they be a full node (only because most
// SPV clients don't have a good address DB available)
- if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) {
+ if (!fFeeler && !m_msgproc->HasAllDesirableServiceFlags(addr.nServices)) {
continue;
} else if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) {
continue;
diff --git a/src/net.h b/src/net.h
index 6ff4ca3c41..e78e122c44 100644
--- a/src/net.h
+++ b/src/net.h
@@ -97,7 +97,7 @@ static constexpr bool DEFAULT_FIXEDSEEDS{true};
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
-static constexpr bool DEFAULT_V2_TRANSPORT{false};
+static constexpr bool DEFAULT_V2_TRANSPORT{true};
typedef int64_t NodeId;
@@ -1006,6 +1006,12 @@ public:
virtual void FinalizeNode(const CNode& node) = 0;
/**
+ * Callback to determine whether the given set of service flags are sufficient
+ * for a peer to be "relevant".
+ */
+ virtual bool HasAllDesirableServiceFlags(ServiceFlags services) const = 0;
+
+ /**
* Process protocol messages received from a given node
*
* @param[in] pnode The node which we have received messages from.
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 80cf610a0d..1cf72771b5 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -133,6 +133,8 @@ static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
+/** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */
+static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
/** Average delay between local address broadcasts */
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
/** Average delay between peer address broadcasts */
@@ -499,6 +501,7 @@ public:
/** Implement NetEventsInterface */
void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex);
+ bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
bool SendMessages(CNode* pto) override
@@ -513,12 +516,17 @@ public:
bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
- void SetBestHeight(int height) override { m_best_height = height; };
+ void SetBestBlock(int height, std::chrono::seconds time) override
+ {
+ m_best_height = height;
+ m_best_block_time = time;
+ };
void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
+ ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
private:
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
@@ -721,6 +729,8 @@ private:
/** The height of the best chain */
std::atomic<int> m_best_height{-1};
+ /** The time of the best chain tip block */
+ std::atomic<std::chrono::seconds> m_best_block_time{0s};
/** Next time to check for stale tip */
std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
@@ -993,6 +1003,12 @@ private:
bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
+ * Estimates the distance, in blocks, between the best-known block and the network chain tip.
+ * Utilizes the best-block time and the chainparams blocks spacing to approximate it.
+ */
+ int64_t ApproximateBestBlockDepth() const;
+
+ /**
* To prevent fingerprinting attacks, only send blocks/headers outside of
* the active chain if they are no more than a month older (both in time,
* and in best equivalent proof of work) than the best header chain we know
@@ -1311,9 +1327,14 @@ bool PeerManagerImpl::TipMayBeStale()
return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
}
+int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
+{
+ return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
+}
+
bool PeerManagerImpl::CanDirectFetch()
{
- return m_chainman.ActiveChain().Tip()->Time() > GetAdjustedTime() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
+ return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
}
static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@@ -1651,6 +1672,23 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
}
+bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
+{
+ // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
+ return !(GetDesirableServiceFlags(services) & (~services));
+}
+
+ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
+{
+ if (services & NODE_NETWORK_LIMITED) {
+ // Limited peers are desirable when we are close to the tip.
+ if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
+ return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS);
+ }
+ }
+ return ServiceFlags(NODE_NETWORK | NODE_WITNESS);
+}
+
PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
{
LOCK(m_peer_mutex);
@@ -2047,8 +2085,7 @@ void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::sha
*/
void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
{
- SetBestHeight(pindexNew->nHeight);
- SetServiceFlagsIBDCache(!fInitialDownload);
+ SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
// Don't relay inventory during initial block download.
if (fInitialDownload) return;
@@ -5555,7 +5592,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
// Only actively request headers from a single peer, unless we're close to today.
- if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
+ if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
const CBlockIndex* pindexStart = m_chainman.m_best_header;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
@@ -5575,7 +5612,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
// to maintain precision
std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
- Ticks<std::chrono::seconds>(GetAdjustedTime() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
+ Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
);
nSyncStarted++;
}
@@ -5879,7 +5916,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check for headers sync timeouts
if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
// Detect whether this is a stalling initial-headers-sync peer
- if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
+ if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
// Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
// and we have others we could be using instead.
diff --git a/src/net_processing.h b/src/net_processing.h
index afdef00067..f8d7a8f511 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -92,8 +92,8 @@ public:
/** Send ping message to all peers */
virtual void SendPings() = 0;
- /** Set the best height */
- virtual void SetBestHeight(int height) = 0;
+ /** Set the height of the best block and its time (seconds since epoch). */
+ virtual void SetBestBlock(int height, std::chrono::seconds time) = 0;
/* Public for unit testing. */
virtual void UnitTestMisbehaving(NodeId peer_id, int howmuch) = 0;
@@ -110,6 +110,29 @@ public:
/** This function is used for testing the stale tip eviction logic, see denialofservice_tests.cpp */
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) = 0;
+
+ /**
+ * Gets the set of service flags which are "desirable" for a given peer.
+ *
+ * These are the flags which are required for a peer to support for them
+ * to be "interesting" to us, ie for us to wish to use one of our few
+ * outbound connection slots for or for us to wish to prioritize keeping
+ * their connection around.
+ *
+ * Relevant service flags may be peer- and state-specific in that the
+ * version of the peer may determine which flags are required (eg in the
+ * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers
+ * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which
+ * case NODE_NETWORK_LIMITED suffices).
+ *
+ * Thus, generally, avoid calling with 'services' == NODE_NONE, unless
+ * state-specific flags must absolutely be avoided. When called with
+ * 'services' == NODE_NONE, the returned desirable service flags are
+ * guaranteed to not change dependent on state - ie they are suitable for
+ * use when describing peers which we know to be desirable, but for which
+ * we do not have a confirmed set of service flags.
+ */
+ virtual ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const = 0;
};
#endif // BITCOIN_NET_PROCESSING_H
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index ce5452d1f9..89498fb976 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -31,7 +31,7 @@ namespace node {
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
- int64_t nNewTime{std::max<int64_t>(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch<std::chrono::seconds>(GetAdjustedTime()))};
+ int64_t nNewTime{std::max<int64_t>(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
if (nOldTime < nNewTime) {
pblock->nTime = nNewTime;
@@ -133,7 +133,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblock->nVersion = gArgs.GetIntArg("-blockversion", pblock->nVersion);
}
- pblock->nTime = TicksSinceEpoch<std::chrono::seconds>(GetAdjustedTime());
+ pblock->nTime = TicksSinceEpoch<std::chrono::seconds>(NodeClock::now());
m_lock_time_cutoff = pindexPrev->GetMedianTimePast();
int nPackagesSelected = 0;
@@ -171,7 +171,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
BlockValidationState state;
if (m_options.test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev,
- GetAdjustedTime, /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) {
+ /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
const auto time_2{SteadyClock::now()};
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 27a0a2ffc1..0da160768d 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -9,8 +9,6 @@
#include <atomic>
-static std::atomic<bool> g_initial_block_download_completed(false);
-
namespace NetMsgType {
const char* VERSION = "version";
const char* VERACK = "verack";
@@ -125,18 +123,6 @@ bool CMessageHeader::IsCommandValid() const
return true;
}
-
-ServiceFlags GetDesirableServiceFlags(ServiceFlags services) {
- if ((services & NODE_NETWORK_LIMITED) && g_initial_block_download_completed) {
- return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS);
- }
- return ServiceFlags(NODE_NETWORK | NODE_WITNESS);
-}
-
-void SetServiceFlagsIBDCache(bool state) {
- g_initial_block_download_completed = state;
-}
-
CInv::CInv()
{
type = 0;
diff --git a/src/protocol.h b/src/protocol.h
index e405253632..243cd23e6e 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -311,43 +311,12 @@ enum ServiceFlags : uint64_t {
std::vector<std::string> serviceFlagsToStr(uint64_t flags);
/**
- * Gets the set of service flags which are "desirable" for a given peer.
- *
- * These are the flags which are required for a peer to support for them
- * to be "interesting" to us, ie for us to wish to use one of our few
- * outbound connection slots for or for us to wish to prioritize keeping
- * their connection around.
- *
- * Relevant service flags may be peer- and state-specific in that the
- * version of the peer may determine which flags are required (eg in the
- * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers
- * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which
- * case NODE_NETWORK_LIMITED suffices).
- *
- * Thus, generally, avoid calling with peerServices == NODE_NONE, unless
- * state-specific flags must absolutely be avoided. When called with
- * peerServices == NODE_NONE, the returned desirable service flags are
- * guaranteed to not change dependent on state - ie they are suitable for
- * use when describing peers which we know to be desirable, but for which
- * we do not have a confirmed set of service flags.
- *
- * If the NODE_NONE return value is changed, contrib/seeds/makeseeds.py
- * should be updated appropriately to filter for the same nodes.
- */
-ServiceFlags GetDesirableServiceFlags(ServiceFlags services);
-
-/** Set the current IBD status in order to figure out the desirable service flags */
-void SetServiceFlagsIBDCache(bool status);
-
-/**
- * A shortcut for (services & GetDesirableServiceFlags(services))
- * == GetDesirableServiceFlags(services), ie determines whether the given
- * set of service flags are sufficient for a peer to be "relevant".
- */
-static inline bool HasAllDesirableServiceFlags(ServiceFlags services)
-{
- return !(GetDesirableServiceFlags(services) & (~services));
-}
+ * State independent service flags.
+ * If the return value is changed, contrib/seeds/makeseeds.py
+ * should be updated appropriately to filter for nodes with
+ * desired service flags (compatible with our new flags).
+ */
+constexpr ServiceFlags SeedsServiceFlags() { return ServiceFlags(NODE_NETWORK | NODE_WITNESS); }
/**
* Checks if a peer with the given service flags may be capable of having a
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 49b51b83ae..4050cb1996 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -383,7 +383,7 @@ static RPCHelpMan generateblock()
LOCK(cs_main);
BlockValidationState state;
- if (!TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), GetAdjustedTime, false, false)) {
+ if (!TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), false, false)) {
throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("TestBlockValidity failed: %s", state.ToString()));
}
}
@@ -697,7 +697,7 @@ static RPCHelpMan getblocktemplate()
if (block.hashPrevBlock != pindexPrev->GetBlockHash())
return "inconclusive-not-best-prevblk";
BlockValidationState state;
- TestBlockValidity(state, chainman.GetParams(), active_chainstate, block, pindexPrev, GetAdjustedTime, false, true);
+ TestBlockValidity(state, chainman.GetParams(), active_chainstate, block, pindexPrev, false, true);
return BIP22ValidationResult(state);
}
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 9b97958a5d..2577f9e97a 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -213,7 +213,6 @@ FUZZ_TARGET(integer, .init = initialize_integer)
{
const ServiceFlags service_flags = (ServiceFlags)u64;
- (void)HasAllDesirableServiceFlags(service_flags);
(void)MayHaveUsefulAddressDB(service_flags);
}
diff --git a/src/test/peerman_tests.cpp b/src/test/peerman_tests.cpp
new file mode 100644
index 0000000000..2c79329385
--- /dev/null
+++ b/src/test/peerman_tests.cpp
@@ -0,0 +1,76 @@
+// Copyright (c) 2024-present The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <node/miner.h>
+#include <net_processing.h>
+#include <pow.h>
+#include <test/util/setup_common.h>
+#include <validation.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(peerman_tests, RegTestingSetup)
+
+/** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */
+static constexpr int64_t NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
+
+static void mineBlock(const node::NodeContext& node, std::chrono::seconds block_time)
+{
+ auto curr_time = GetTime<std::chrono::seconds>();
+ SetMockTime(block_time); // update time so the block is created with it
+ CBlock block = node::BlockAssembler{node.chainman->ActiveChainstate(), nullptr}.CreateNewBlock(CScript() << OP_TRUE)->block;
+ while (!CheckProofOfWork(block.GetHash(), block.nBits, node.chainman->GetConsensus())) ++block.nNonce;
+ block.fChecked = true; // little speedup
+ SetMockTime(curr_time); // process block at current time
+ Assert(node.chainman->ProcessNewBlock(std::make_shared<const CBlock>(block), /*force_processing=*/true, /*min_pow_checked=*/true, nullptr));
+ SyncWithValidationInterfaceQueue(); // drain events queue
+}
+
+// Verifying when network-limited peer connections are desirable based on the node's proximity to the tip
+BOOST_AUTO_TEST_CASE(connections_desirable_service_flags)
+{
+ std::unique_ptr<PeerManager> peerman = PeerManager::make(*m_node.connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, {});
+ auto consensus = m_node.chainman->GetParams().GetConsensus();
+
+ // Check we start connecting to full nodes
+ ServiceFlags peer_flags{NODE_WITNESS | NODE_NETWORK_LIMITED};
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS));
+
+ // Make peerman aware of the initial best block and verify we accept limited peers when we start close to the tip time.
+ auto tip = WITH_LOCK(::cs_main, return m_node.chainman->ActiveChain().Tip());
+ uint64_t tip_block_time = tip->GetBlockTime();
+ int tip_block_height = tip->nHeight;
+ peerman->SetBestBlock(tip_block_height, std::chrono::seconds{tip_block_time});
+
+ SetMockTime(tip_block_time + 1); // Set node time to tip time
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS));
+
+ // Check we don't disallow limited peers connections when we are behind but still recoverable (below the connection safety window)
+ SetMockTime(GetTime<std::chrono::seconds>() + std::chrono::seconds{consensus.nPowTargetSpacing * (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS - 1)});
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS));
+
+ // Check we disallow limited peers connections when we are further than the limited peers safety window
+ SetMockTime(GetTime<std::chrono::seconds>() + std::chrono::seconds{consensus.nPowTargetSpacing * 2});
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS));
+
+ // By now, we tested that the connections desirable services flags change based on the node's time proximity to the tip.
+ // Now, perform the same tests for when the node receives a block.
+ RegisterValidationInterface(peerman.get());
+
+ // First, verify a block in the past doesn't enable limited peers connections
+ // At this point, our time is (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1) * 10 minutes ahead the tip's time.
+ mineBlock(m_node, /*block_time=*/std::chrono::seconds{tip_block_time + 1});
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS));
+
+ // Verify a block close to the tip enables limited peers connections
+ mineBlock(m_node, /*block_time=*/GetTime<std::chrono::seconds>());
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS));
+
+ // Lastly, verify the stale tip checks can disallow limited peers connections after not receiving blocks for a prolonged period.
+ SetMockTime(GetTime<std::chrono::seconds>() + std::chrono::seconds{consensus.nPowTargetSpacing * NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1});
+ BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 8789e86196..f8facad883 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -184,7 +184,6 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, const std::vecto
const ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
.datadir = m_args.GetDataDirNet(),
- .adjusted_time_callback = GetAdjustedTime,
.check_block_index = true,
.notifications = *m_node.notifications,
.worker_threads_num = 2,
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 368ba8bee4..5f298b562b 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -383,7 +383,6 @@ struct SnapshotTestSetup : TestChain100Setup {
const ChainstateManager::Options chainman_opts{
.chainparams = ::Params(),
.datadir = chainman.m_options.datadir,
- .adjusted_time_callback = GetAdjustedTime,
.notifications = *m_node.notifications,
};
const BlockManager::Options blockman_opts{
diff --git a/src/timedata.cpp b/src/timedata.cpp
index 15ca90ee6a..d948de976f 100644
--- a/src/timedata.cpp
+++ b/src/timedata.cpp
@@ -33,11 +33,6 @@ int64_t GetTimeOffset()
return nTimeOffset;
}
-NodeClock::time_point GetAdjustedTime()
-{
- return NodeClock::now() + std::chrono::seconds{GetTimeOffset()};
-}
-
#define BITCOIN_TIMEDATA_MAX_SAMPLES 200
static std::set<CNetAddr> g_sources;
diff --git a/src/timedata.h b/src/timedata.h
index c6c36d9a39..90428d071c 100644
--- a/src/timedata.h
+++ b/src/timedata.h
@@ -75,11 +75,10 @@ public:
/** Functions to keep track of adjusted P2P time */
int64_t GetTimeOffset();
-NodeClock::time_point GetAdjustedTime();
void AddTimeData(const CNetAddr& ip, int64_t nTime);
/**
- * Reset the internal state of GetTimeOffset(), GetAdjustedTime() and AddTimeData().
+ * Reset the internal state of GetTimeOffset() and AddTimeData().
*/
void TestOnlyResetTimeData();
diff --git a/src/validation.cpp b/src/validation.cpp
index 4dc3a0f512..1302eb84fa 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -3777,7 +3777,7 @@ arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader>& headers)
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev, NodeClock::time_point now) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
AssertLockHeld(::cs_main);
assert(pindexPrev != nullptr);
@@ -3805,7 +3805,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
// Check timestamp
- if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
+ if (block.Time() > NodeClock::now() + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
}
@@ -3945,7 +3945,7 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida
LogPrint(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
- if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev, m_options.adjusted_time_callback())) {
+ if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev)) {
LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
return false;
}
@@ -4230,7 +4230,6 @@ bool TestBlockValidity(BlockValidationState& state,
Chainstate& chainstate,
const CBlock& block,
CBlockIndex* pindexPrev,
- const std::function<NodeClock::time_point()>& adjusted_time_callback,
bool fCheckPOW,
bool fCheckMerkleRoot)
{
@@ -4244,7 +4243,7 @@ bool TestBlockValidity(BlockValidationState& state,
indexDummy.phashBlock = &block_hash;
// NOTE: CheckBlockHeader is called by CheckBlock
- if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, adjusted_time_callback()))
+ if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
@@ -5780,7 +5779,6 @@ static ChainstateManager::Options&& Flatten(ChainstateManager::Options&& opts)
if (!opts.check_block_index.has_value()) opts.check_block_index = opts.chainparams.DefaultConsistencyChecks();
if (!opts.minimum_chain_work.has_value()) opts.minimum_chain_work = UintToArith256(opts.chainparams.GetConsensus().nMinimumChainWork);
if (!opts.assumed_valid_block.has_value()) opts.assumed_valid_block = opts.chainparams.GetConsensus().defaultAssumeValid;
- Assert(opts.adjusted_time_callback);
return std::move(opts);
}
diff --git a/src/validation.h b/src/validation.h
index 093cecfcd1..fd9b53df8f 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -377,7 +377,6 @@ bool TestBlockValidity(BlockValidationState& state,
Chainstate& chainstate,
const CBlock& block,
CBlockIndex* pindexPrev,
- const std::function<NodeClock::time_point()>& adjusted_time_callback,
bool fCheckPOW = true,
bool fCheckMerkleRoot = true) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index db9989163d..cff3628049 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -377,6 +377,17 @@ void SQLiteDatabase::Close()
m_db = nullptr;
}
+bool SQLiteDatabase::HasActiveTxn()
+{
+ // 'sqlite3_get_autocommit' returns true by default, and false if a transaction has begun and not been committed or rolled back.
+ return m_db && sqlite3_get_autocommit(m_db) == 0;
+}
+
+int SQliteExecHandler::Exec(SQLiteDatabase& database, const std::string& statement)
+{
+ return sqlite3_exec(database.m_db, statement.data(), nullptr, nullptr, nullptr);
+}
+
std::unique_ptr<DatabaseBatch> SQLiteDatabase::MakeBatch(bool flush_on_close)
{
// We ignore flush_on_close because we don't do manual flushing for SQLite
@@ -394,12 +405,18 @@ SQLiteBatch::SQLiteBatch(SQLiteDatabase& database)
void SQLiteBatch::Close()
{
- // If m_db is in a transaction (i.e. not in autocommit mode), then abort the transaction in progress
- if (m_database.m_db && sqlite3_get_autocommit(m_database.m_db) == 0) {
+ bool force_conn_refresh = false;
+
+ // If we began a transaction, and it wasn't committed, abort the transaction in progress
+ if (m_database.HasActiveTxn()) {
if (TxnAbort()) {
LogPrintf("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted\n");
} else {
- LogPrintf("SQLiteBatch: Batch closed and failed to abort transaction\n");
+ // If transaction cannot be aborted, it means there is a bug or there has been data corruption. Try to recover in this case
+ // by closing and reopening the database. Closing the database should also ensure that any changes made since the transaction
+ // was opened will be rolled back and future transactions can succeed without committing old data.
+ force_conn_refresh = true;
+ LogPrintf("SQLiteBatch: Batch closed and failed to abort transaction, resetting db connection..\n");
}
}
@@ -420,6 +437,17 @@ void SQLiteBatch::Close()
}
*stmt_prepared = nullptr;
}
+
+ if (force_conn_refresh) {
+ m_database.Close();
+ try {
+ m_database.Open();
+ } catch (const std::runtime_error&) {
+ // If open fails, cleanup this object and rethrow the exception
+ m_database.Close();
+ throw;
+ }
+ }
}
bool SQLiteBatch::ReadKey(DataStream&& key, DataStream& value)
@@ -606,8 +634,8 @@ std::unique_ptr<DatabaseCursor> SQLiteBatch::GetNewPrefixCursor(Span<const std::
bool SQLiteBatch::TxnBegin()
{
- if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) == 0) return false;
- int res = sqlite3_exec(m_database.m_db, "BEGIN TRANSACTION", nullptr, nullptr, nullptr);
+ if (!m_database.m_db || m_database.HasActiveTxn()) return false;
+ int res = Assert(m_exec_handler)->Exec(m_database, "BEGIN TRANSACTION");
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to begin the transaction\n");
}
@@ -616,8 +644,8 @@ bool SQLiteBatch::TxnBegin()
bool SQLiteBatch::TxnCommit()
{
- if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) return false;
- int res = sqlite3_exec(m_database.m_db, "COMMIT TRANSACTION", nullptr, nullptr, nullptr);
+ if (!m_database.HasActiveTxn()) return false;
+ int res = Assert(m_exec_handler)->Exec(m_database, "COMMIT TRANSACTION");
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to commit the transaction\n");
}
@@ -626,8 +654,8 @@ bool SQLiteBatch::TxnCommit()
bool SQLiteBatch::TxnAbort()
{
- if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) return false;
- int res = sqlite3_exec(m_database.m_db, "ROLLBACK TRANSACTION", nullptr, nullptr, nullptr);
+ if (!m_database.HasActiveTxn()) return false;
+ int res = Assert(m_exec_handler)->Exec(m_database, "ROLLBACK TRANSACTION");
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to abort the transaction\n");
}
diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h
index f1ce0567e1..ad91be1064 100644
--- a/src/wallet/sqlite.h
+++ b/src/wallet/sqlite.h
@@ -36,11 +36,21 @@ public:
Status Next(DataStream& key, DataStream& value) override;
};
+/** Class responsible for executing SQL statements in SQLite databases.
+ * Methods are virtual so they can be overridden by unit tests testing unusual database conditions. */
+class SQliteExecHandler
+{
+public:
+ virtual ~SQliteExecHandler() {}
+ virtual int Exec(SQLiteDatabase& database, const std::string& statement);
+};
+
/** RAII class that provides access to a WalletDatabase */
class SQLiteBatch : public DatabaseBatch
{
private:
SQLiteDatabase& m_database;
+ std::unique_ptr<SQliteExecHandler> m_exec_handler{std::make_unique<SQliteExecHandler>()};
sqlite3_stmt* m_read_stmt{nullptr};
sqlite3_stmt* m_insert_stmt{nullptr};
@@ -61,6 +71,8 @@ public:
explicit SQLiteBatch(SQLiteDatabase& database);
~SQLiteBatch() override { Close(); }
+ void SetExecHandler(std::unique_ptr<SQliteExecHandler>&& handler) { m_exec_handler = std::move(handler); }
+
/* No-op. See comment on SQLiteDatabase::Flush */
void Flush() override {}
@@ -142,6 +154,9 @@ public:
/** Make a SQLiteBatch connected to this database */
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override;
+ /** Return true if there is an on-going txn in this connection */
+ bool HasActiveTxn();
+
sqlite3* m_db{nullptr};
bool m_use_unsafe_sync;
};
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index d341e84d9b..7e6219378f 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -205,5 +205,82 @@ BOOST_AUTO_TEST_CASE(db_cursor_prefix_byte_test)
}
}
+BOOST_AUTO_TEST_CASE(db_availability_after_write_error)
+{
+ // Ensures the database remains accessible without deadlocking after a write error.
+ // To simulate the behavior, record overwrites are disallowed, and the test verifies
+ // that the database remains active after failing to store an existing record.
+ for (const auto& database : TestDatabases(m_path_root)) {
+ // Write original record
+ std::unique_ptr<DatabaseBatch> batch = database->MakeBatch();
+ std::string key = "key";
+ std::string value = "value";
+ std::string value2 = "value_2";
+ BOOST_CHECK(batch->Write(key, value));
+ // Attempt to overwrite the record (expect failure)
+ BOOST_CHECK(!batch->Write(key, value2, /*fOverwrite=*/false));
+ // Successfully overwrite the record
+ BOOST_CHECK(batch->Write(key, value2, /*fOverwrite=*/true));
+ // Sanity-check; read and verify the overwritten value
+ std::string read_value;
+ BOOST_CHECK(batch->Read(key, read_value));
+ BOOST_CHECK_EQUAL(read_value, value2);
+ }
+}
+
+#ifdef USE_SQLITE
+
+// Test-only statement execution error
+constexpr int TEST_SQLITE_ERROR = -999;
+
+class DbExecBlocker : public SQliteExecHandler
+{
+private:
+ SQliteExecHandler m_base_exec;
+ std::set<std::string> m_blocked_statements;
+public:
+ DbExecBlocker(std::set<std::string> blocked_statements) : m_blocked_statements(blocked_statements) {}
+ int Exec(SQLiteDatabase& database, const std::string& statement) override {
+ if (m_blocked_statements.contains(statement)) return TEST_SQLITE_ERROR;
+ return m_base_exec.Exec(database, statement);
+ }
+};
+
+BOOST_AUTO_TEST_CASE(txn_close_failure_dangling_txn)
+{
+ // Verifies that there is no active dangling, to-be-reversed db txn
+ // after the batch object that initiated it is destroyed.
+ DatabaseOptions options;
+ DatabaseStatus status;
+ bilingual_str error;
+ std::unique_ptr<SQLiteDatabase> database = MakeSQLiteDatabase(m_path_root / "sqlite", options, status, error);
+
+ std::string key = "key";
+ std::string value = "value";
+
+ std::unique_ptr<SQLiteBatch> batch = std::make_unique<SQLiteBatch>(*database);
+ BOOST_CHECK(batch->TxnBegin());
+ BOOST_CHECK(batch->Write(key, value));
+ // Set a handler to prevent txn abortion during destruction.
+ // Mimicking a db statement execution failure.
+ batch->SetExecHandler(std::make_unique<DbExecBlocker>(std::set<std::string>{"ROLLBACK TRANSACTION"}));
+ // Destroy batch
+ batch.reset();
+
+ // Ensure there is no dangling, to-be-reversed db txn
+ BOOST_CHECK(!database->HasActiveTxn());
+
+ // And, just as a sanity check; verify that new batchs only write what they suppose to write
+ // and nothing else.
+ std::string key2 = "key2";
+ std::unique_ptr<SQLiteBatch> batch2 = std::make_unique<SQLiteBatch>(*database);
+ BOOST_CHECK(batch2->Write(key2, value));
+ // The first key must not exist
+ BOOST_CHECK(!batch2->Exists(key));
+}
+
+#endif // USE_SQLITE
+
+
BOOST_AUTO_TEST_SUITE_END()
} // namespace wallet
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index e93cd4b4b9..33b3ad6e91 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -3864,7 +3864,11 @@ std::optional<MigrationData> CWallet::GetDescriptorsForLegacy(bilingual_str& err
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
- assert(legacy_spkm);
+ if (!Assume(legacy_spkm)) {
+ // This shouldn't happen
+ error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"));
+ return std::nullopt;
+ }
std::optional<MigrationData> res = legacy_spkm->MigrateToDescriptor();
if (res == std::nullopt) {
@@ -3879,8 +3883,9 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error)
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
- if (!legacy_spkm) {
- error = _("Error: This wallet is already a descriptor wallet");
+ if (!Assume(legacy_spkm)) {
+ // This shouldn't happen
+ error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"));
return false;
}
@@ -4215,7 +4220,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
}
// Before anything else, check if there is something to migrate.
- if (!local_wallet->GetLegacyScriptPubKeyMan()) {
+ if (local_wallet->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
return util::Error{_("Error: This wallet is already a descriptor wallet")};
}
@@ -4248,8 +4253,11 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
// First change to using SQLite
if (!local_wallet->MigrateToSQLite(error)) return util::Error{error};
- // Do the migration, and cleanup if it fails
- success = DoMigration(*local_wallet, context, error, res);
+ // Do the migration of keys and scripts for non-blank wallets, and cleanup if it fails
+ success = local_wallet->IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET);
+ if (!success) {
+ success = DoMigration(*local_wallet, context, error, res);
+ }
}
// In case of reloading failure, we need to remember the wallet dirs to remove
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 58956a95f7..838dcba141 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -130,8 +130,15 @@ class TestNode():
# Default behavior from global -v2transport flag is added to args to persist it over restarts.
# May be overwritten in individual tests, using extra_args.
self.default_to_v2 = v2transport
- if self.default_to_v2:
- self.args.append("-v2transport=1")
+ if self.version_is_at_least(260000):
+ # 26.0 and later support v2transport
+ if v2transport:
+ self.args.append("-v2transport=1")
+ else:
+ self.args.append("-v2transport=0")
+ else:
+ # v2transport requested but not supported for node
+ assert not v2transport
self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path)
self.use_cli = use_cli
diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py
index 56cb71cd62..e647fb2d5c 100755
--- a/test/functional/wallet_import_rescan.py
+++ b/test/functional/wallet_import_rescan.py
@@ -145,8 +145,10 @@ TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
-def get_rand_amount():
- r = random.uniform(AMOUNT_DUST, 1)
+def get_rand_amount(min_amount=AMOUNT_DUST):
+ assert min_amount <= 1
+ r = random.uniform(min_amount, 1)
+ # note: min_amount can get rounded down here
return Decimal(str(round(r, 8)))
@@ -273,7 +275,7 @@ class ImportRescanTest(BitcoinTestFramework):
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
# Ensure output is large enough to pay for fees: conservatively assuming txsize of
# 500 vbytes and feerate of 20 sats/vbytes
- variant.initial_amount = max(get_rand_amount(), (500 * 20 / COIN) + AMOUNT_DUST)
+ variant.initial_amount = get_rand_amount(min_amount=((500 * 20 / COIN) + AMOUNT_DUST))
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
variant.confirmation_height = 0
variant.timestamp = timestamp
diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py
index 3d68dbe07a..20e92dbef7 100755
--- a/test/functional/wallet_migration.py
+++ b/test/functional/wallet_migration.py
@@ -52,13 +52,12 @@ class WalletMigrationTest(BitcoinTestFramework):
assert_equal(file_magic, b'SQLite format 3\x00')
assert_equal(self.nodes[0].get_wallet_rpc(wallet_name).getwalletinfo()["format"], "sqlite")
- def create_legacy_wallet(self, wallet_name, disable_private_keys=False):
- self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=False, disable_private_keys=disable_private_keys)
+ def create_legacy_wallet(self, wallet_name, **kwargs):
+ self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=False, **kwargs)
wallet = self.nodes[0].get_wallet_rpc(wallet_name)
info = wallet.getwalletinfo()
assert_equal(info["descriptors"], False)
assert_equal(info["format"], "bdb")
- assert_equal(info["private_keys_enabled"], not disable_private_keys)
return wallet
def assert_addr_info_equal(self, addr_info, addr_info_old):
@@ -876,6 +875,13 @@ class WalletMigrationTest(BitcoinTestFramework):
_, _, magic = struct.unpack("QII", data)
assert_equal(magic, BTREE_MAGIC)
+ def test_blank(self):
+ self.log.info("Test that a blank wallet is migrated")
+ wallet = self.create_legacy_wallet("blank", blank=True)
+ assert_equal(wallet.getwalletinfo()["blank"], True)
+ wallet.migratewallet()
+ assert_equal(wallet.getwalletinfo()["blank"], True)
+
def test_avoidreuse(self):
self.log.info("Test that avoidreuse persists after migration")
@@ -987,6 +993,7 @@ class WalletMigrationTest(BitcoinTestFramework):
self.test_failed_migration_cleanup()
self.test_avoidreuse()
self.test_preserve_tx_extra_info()
+ self.test_blank()
if __name__ == '__main__':
WalletMigrationTest().main()