aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/init.cpp2
-rw-r--r--src/net.cpp54
-rw-r--r--src/net.h17
-rw-r--r--src/net_processing.cpp75
-rw-r--r--src/net_processing.h4
-rw-r--r--src/primitives/transaction.cpp2
-rw-r--r--src/primitives/transaction.h8
-rw-r--r--src/sync.cpp32
-rw-r--r--src/sync.h10
-rw-r--r--src/test/fuzz/connman.cpp2
-rw-r--r--src/test/fuzz/script_sigcache.cpp2
-rw-r--r--src/test/fuzz/transaction.cpp2
-rw-r--r--src/test/reverselock_tests.cpp10
-rw-r--r--src/test/sync_tests.cpp37
-rw-r--r--src/wallet/bdb.cpp45
-rw-r--r--src/wallet/bdb.h10
-rw-r--r--src/wallet/db.cpp117
-rw-r--r--src/wallet/db.h8
-rw-r--r--src/wallet/feebumper.cpp2
-rw-r--r--src/wallet/interfaces.cpp2
-rw-r--r--src/wallet/rpcwallet.cpp2
-rw-r--r--src/wallet/salvage.cpp5
-rw-r--r--src/wallet/sqlite.cpp45
-rw-r--r--src/wallet/sqlite.h2
-rw-r--r--src/wallet/test/db_tests.cpp7
-rw-r--r--src/wallet/test/wallet_tests.cpp6
-rw-r--r--src/wallet/wallet.cpp5
-rw-r--r--src/wallet/walletdb.cpp8
-rw-r--r--src/wallet/walletutil.cpp55
-rw-r--r--src/wallet/walletutil.h3
-rwxr-xr-xtest/functional/p2p_invalid_messages.py9
-rwxr-xr-xtest/functional/p2p_leak.py19
-rwxr-xr-xtest/functional/p2p_timeouts.py6
-rwxr-xr-xtest/functional/test_framework/test_node.py17
-rw-r--r--test/sanitizer_suppressions/tsan1
35 files changed, 377 insertions, 254 deletions
diff --git a/src/init.cpp b/src/init.cpp
index 5460c9b2b0..09be3d01fa 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -200,7 +200,7 @@ void Shutdown(NodeContext& node)
// using the other before destroying them.
if (node.peerman) UnregisterValidationInterface(node.peerman.get());
// Follow the lock order requirements:
- // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling GetExtraOutboundCount
+ // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling GetExtraFullOutboundCount
// which locks cs_vNodes.
// * ProcessMessage locks cs_main and g_cs_orphans before indirectly calling ForEachNode which
// locks cs_vNodes.
diff --git a/src/net.cpp b/src/net.cpp
index bbb85694e7..7cb91f1388 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1827,18 +1827,32 @@ void CConnman::SetTryNewOutboundPeer(bool flag)
// Also exclude peers that haven't finished initial connection handshake yet
// (so that we don't decide we're over our desired connection limit, and then
// evict some peer that has finished the handshake)
-int CConnman::GetExtraOutboundCount()
+int CConnman::GetExtraFullOutboundCount()
{
- int nOutbound = 0;
+ int full_outbound_peers = 0;
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) {
- ++nOutbound;
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsFullOutboundConn()) {
+ ++full_outbound_peers;
}
}
}
- return std::max(nOutbound - m_max_outbound_full_relay - m_max_outbound_block_relay, 0);
+ return std::max(full_outbound_peers - m_max_outbound_full_relay, 0);
+}
+
+int CConnman::GetExtraBlockRelayCount()
+{
+ int block_relay_peers = 0;
+ {
+ LOCK(cs_vNodes);
+ for (const CNode* pnode : vNodes) {
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsBlockOnlyConn()) {
+ ++block_relay_peers;
+ }
+ }
+ }
+ return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
}
void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
@@ -1869,6 +1883,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Minimum time before next feeler connection (in microseconds).
int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL);
+ int64_t nNextExtraBlockRelay = PoissonNextSend(nStart*1000*1000, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
while (!interruptNet)
{
ProcessAddrFetch();
@@ -1941,8 +1956,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// until we hit our block-relay-only peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
- // these conditions are met, check the nNextFeeler timer to decide if
- // we should open a FEELER.
+ // these conditions are met, check to see if it's time to try an extra
+ // block-relay-only peer (to confirm our tip is current, see below) or the nNextFeeler
+ // timer to decide if we should open a FEELER.
if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) {
conn_type = ConnectionType::BLOCK_RELAY;
@@ -1953,6 +1969,30 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
conn_type = ConnectionType::BLOCK_RELAY;
} else if (GetTryNewOutboundPeer()) {
// OUTBOUND_FULL_RELAY
+ } else if (nTime > nNextExtraBlockRelay && m_start_extra_block_relay_peers) {
+ // Periodically connect to a peer (using regular outbound selection
+ // methodology from addrman) and stay connected long enough to sync
+ // headers, but not much else.
+ //
+ // Then disconnect the peer, if we haven't learned anything new.
+ //
+ // The idea is to make eclipse attacks very difficult to pull off,
+ // because every few minutes we're finding a new peer to learn headers
+ // from.
+ //
+ // This is similar to the logic for trying extra outbound (full-relay)
+ // peers, except:
+ // - we do this all the time on a poisson timer, rather than just when
+ // our tip is stale
+ // - we potentially disconnect our next-youngest block-relay-only peer, if our
+ // newest block-relay-only peer delivers a block more recently.
+ // See the eviction logic in net_processing.cpp.
+ //
+ // Because we can promote these connections to block-relay-only
+ // connections, they do not get their own ConnectionType enum
+ // (similar to how we deal with extra outbound peers).
+ nNextExtraBlockRelay = PoissonNextSend(nTime, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ conn_type = ConnectionType::BLOCK_RELAY;
} else if (nTime > nNextFeeler) {
nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
diff --git a/src/net.h b/src/net.h
index 504855c386..41f7fa93ff 100644
--- a/src/net.h
+++ b/src/net.h
@@ -48,6 +48,8 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false;
static const int TIMEOUT_INTERVAL = 20 * 60;
/** Run the feeler connection loop once every 2 minutes or 120 seconds. **/
static const int FEELER_INTERVAL = 120;
+/** Run the extra block-relay-only connection loop once every 5 minutes. **/
+static const int EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 300;
/** The maximum number of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
@@ -330,13 +332,20 @@ public:
void SetTryNewOutboundPeer(bool flag);
bool GetTryNewOutboundPeer();
+ void StartExtraBlockRelayPeers() {
+ LogPrint(BCLog::NET, "net: enabling extra block-relay-only peers\n");
+ m_start_extra_block_relay_peers = true;
+ }
+
// Return the number of outbound peers we have in excess of our target (eg,
// if we previously called SetTryNewOutboundPeer(true), and have since set
// to false, we may have extra peers that we wish to disconnect). This may
// return a value less than (num_outbound_connections - num_outbound_slots)
// in cases where some outbound connections are not yet fully connected, or
// not yet fully disconnected.
- int GetExtraOutboundCount();
+ int GetExtraFullOutboundCount();
+ // Count the number of block-relay-only peers we have over our limit.
+ int GetExtraBlockRelayCount();
bool AddNode(const std::string& node);
bool RemoveAddedNode(const std::string& node);
@@ -594,6 +603,12 @@ private:
* This takes the place of a feeler connection */
std::atomic_bool m_try_another_outbound_peer;
+ /** flag for initiating extra block-relay-only peer connections.
+ * this should only be enabled after initial chain sync has occurred,
+ * as these connections are intended to be short-lived and low-bandwidth.
+ */
+ std::atomic_bool m_start_extra_block_relay_peers{false};
+
std::atomic<int64_t> m_next_send_inv_to_incoming{0};
/**
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 05f418cfaa..b61e640f8e 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -2252,10 +2252,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
if (peer == nullptr) return;
if (msg_type == NetMsgType::VERSION) {
- // Each connection can only send one version message
- if (pfrom.nVersion != 0)
- {
- Misbehaving(pfrom.GetId(), 1, "redundant version message");
+ if (pfrom.nVersion != 0) {
+ LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
return;
}
@@ -2449,7 +2447,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
if (pfrom.nVersion == 0) {
// Must have a version message before anything else
- Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
+ LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
return;
}
@@ -2463,7 +2461,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
pfrom.nVersion.load(), pfrom.nStartingHeight,
pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
- pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
+ pfrom.IsBlockOnlyConn() ? "block-relay" : "full-relay");
}
if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
@@ -3906,11 +3904,54 @@ void PeerManager::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
{
- // Check whether we have too many outbound peers
- int extra_peers = m_connman.GetExtraOutboundCount();
- if (extra_peers > 0) {
- // If we have more outbound peers than we target, disconnect one.
- // Pick the outbound peer that least recently announced
+ // If we have any extra block-relay-only peers, disconnect the youngest unless
+ // it's given us a block -- in which case, compare with the second-youngest, and
+ // out of those two, disconnect the peer who least recently gave us a block.
+ // The youngest block-relay-only peer would be the extra peer we connected
+ // to temporarily in order to sync our tip; see net.cpp.
+ // Note that we use higher nodeid as a measure for most recent connection.
+ if (m_connman.GetExtraBlockRelayCount() > 0) {
+ std::pair<NodeId, int64_t> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
+
+ m_connman.ForEachNode([&](CNode* pnode) {
+ if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
+ if (pnode->GetId() > youngest_peer.first) {
+ next_youngest_peer = youngest_peer;
+ youngest_peer.first = pnode->GetId();
+ youngest_peer.second = pnode->nLastBlockTime;
+ }
+ });
+ NodeId to_disconnect = youngest_peer.first;
+ if (youngest_peer.second > next_youngest_peer.second) {
+ // Our newest block-relay-only peer gave us a block more recently;
+ // disconnect our second youngest.
+ to_disconnect = next_youngest_peer.first;
+ }
+ m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
+ AssertLockHeld(::cs_main);
+ // Make sure we're not getting a block right now, and that
+ // we've been connected long enough for this eviction to happen
+ // at all.
+ // Note that we only request blocks from a peer if we learn of a
+ // valid headers chain with at least as much work as our tip.
+ CNodeState *node_state = State(pnode->GetId());
+ if (node_state == nullptr ||
+ (time_in_seconds - pnode->nTimeConnected >= MINIMUM_CONNECT_TIME && node_state->nBlocksInFlight == 0)) {
+ pnode->fDisconnect = true;
+ LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", pnode->GetId(), pnode->nLastBlockTime);
+ return true;
+ } else {
+ LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
+ pnode->GetId(), pnode->nTimeConnected, node_state->nBlocksInFlight);
+ }
+ return false;
+ });
+ }
+
+ // Check whether we have too many OUTBOUND_FULL_RELAY peers
+ if (m_connman.GetExtraFullOutboundCount() > 0) {
+ // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
+ // Pick the OUTBOUND_FULL_RELAY peer that least recently announced
// us a new block, with ties broken by choosing the more recent
// connection (higher node id)
NodeId worst_peer = -1;
@@ -3919,14 +3960,13 @@ void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
- // Ignore non-outbound peers, or nodes marked for disconnect already
- if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
+ // Only consider OUTBOUND_FULL_RELAY peers that are not already
+ // marked for disconnection
+ if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
CNodeState *state = State(pnode->GetId());
if (state == nullptr) return; // shouldn't be possible, but just in case
// Don't evict our protected peers
if (state->m_chain_sync.m_protect) return;
- // Don't evict our block-relay-only peers.
- if (pnode->m_tx_relay == nullptr) return;
if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
worst_peer = pnode->GetId();
oldest_block_announcement = state->m_last_block_announcement;
@@ -3982,6 +4022,11 @@ void PeerManager::CheckForStaleTipAndEvictPeers()
}
m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
}
+
+ if (!m_initial_sync_finished && CanDirectFetch(m_chainparams.GetConsensus())) {
+ m_connman.StartExtraBlockRelayPeers();
+ m_initial_sync_finished = true;
+ }
}
namespace {
diff --git a/src/net_processing.h b/src/net_processing.h
index d5b54dae56..12a4e9c38f 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -206,6 +206,10 @@ private:
//* Whether this node is running in blocks only mode */
const bool m_ignore_incoming_txs;
+ /** Whether we've completed initial sync yet, for determining when to turn
+ * on extra block-relay-only peers. */
+ bool m_initial_sync_finished{false};
+
/** Protects m_peer_map */
mutable Mutex m_peer_mutex;
/**
diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp
index e6183cf2f4..245206b906 100644
--- a/src/primitives/transaction.cpp
+++ b/src/primitives/transaction.cpp
@@ -77,8 +77,6 @@ uint256 CTransaction::ComputeWitnessHash() const
return SerializeHash(*this, SER_GETHASH, 0);
}
-/* For backward compatibility, the hash is initialized to 0. TODO: remove the need for this default constructor entirely. */
-CTransaction::CTransaction() : vin(), vout(), nVersion(CTransaction::CURRENT_VERSION), nLockTime(0), hash{}, m_witness_hash{} {}
CTransaction::CTransaction(const CMutableTransaction& tx) : vin(tx.vin), vout(tx.vout), nVersion(tx.nVersion), nLockTime(tx.nLockTime), hash{ComputeHash()}, m_witness_hash{ComputeWitnessHash()} {}
CTransaction::CTransaction(CMutableTransaction&& tx) : vin(std::move(tx.vin)), vout(std::move(tx.vout)), nVersion(tx.nVersion), nLockTime(tx.nLockTime), hash{ComputeHash()}, m_witness_hash{ComputeWitnessHash()} {}
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 00544f64fe..c1e9f0af21 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -287,12 +287,9 @@ private:
uint256 ComputeWitnessHash() const;
public:
- /** Construct a CTransaction that qualifies as IsNull() */
- CTransaction();
-
/** Convert a CMutableTransaction into a CTransaction. */
- explicit CTransaction(const CMutableTransaction &tx);
- CTransaction(CMutableTransaction &&tx);
+ explicit CTransaction(const CMutableTransaction& tx);
+ CTransaction(CMutableTransaction&& tx);
template <typename Stream>
inline void Serialize(Stream& s) const {
@@ -393,7 +390,6 @@ struct CMutableTransaction
};
typedef std::shared_ptr<const CTransaction> CTransactionRef;
-static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); }
template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); }
/** A generic txid reference (txid or wtxid). */
diff --git a/src/sync.cpp b/src/sync.cpp
index f07916041a..acfbe8fe29 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -228,20 +228,28 @@ template void EnterCritical(const char*, const char*, int, boost::mutex*, bool);
void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
{
- {
- LockData& lockdata = GetLockData();
- std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
-
- const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
- if (!lock_stack.empty()) {
- const auto& lastlock = lock_stack.back();
- if (lastlock.first == cs) {
- lockname = lastlock.second.Name();
- return;
- }
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ if (!lock_stack.empty()) {
+ const auto& lastlock = lock_stack.back();
+ if (lastlock.first == cs) {
+ lockname = lastlock.second.Name();
+ return;
}
}
- throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
+
+ LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n");
+ LogPrintf("Current lock order (least recent first) is:\n");
+ for (const LockStackItem& i : lock_stack) {
+ LogPrintf(" %s\n", i.second.ToString());
+ }
+ if (g_debug_lockorder_abort) {
+ tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname);
+ abort();
+ }
+ throw std::logic_error(strprintf("%s was not most recent critical section locked", guardname));
}
void LeaveCritical()
diff --git a/src/sync.h b/src/sync.h
index 0948083c7f..749bf5575c 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -242,10 +242,12 @@ using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove
(cs).lock(); \
}
-#define LEAVE_CRITICAL_SECTION(cs) \
- { \
- (cs).unlock(); \
- LeaveCritical(); \
+#define LEAVE_CRITICAL_SECTION(cs) \
+ { \
+ std::string lockname; \
+ CheckLastCritical((void*)(&cs), lockname, #cs, __FILE__, __LINE__); \
+ (cs).unlock(); \
+ LeaveCritical(); \
}
//! Run code while locking a mutex.
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index 6521c3f3b2..bb97f58cf2 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -145,7 +145,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
(void)connman.GetAddedNodeInfo();
(void)connman.GetBestHeight();
- (void)connman.GetExtraOutboundCount();
+ (void)connman.GetExtraFullOutboundCount();
(void)connman.GetLocalServices();
(void)connman.GetMaxOutboundTarget();
(void)connman.GetMaxOutboundTimeframe();
diff --git a/src/test/fuzz/script_sigcache.cpp b/src/test/fuzz/script_sigcache.cpp
index 87af71897b..d67654bde3 100644
--- a/src/test/fuzz/script_sigcache.cpp
+++ b/src/test/fuzz/script_sigcache.cpp
@@ -29,7 +29,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
const std::optional<CMutableTransaction> mutable_transaction = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider);
- const CTransaction tx = mutable_transaction ? CTransaction{*mutable_transaction} : CTransaction{};
+ const CTransaction tx{mutable_transaction ? *mutable_transaction : CMutableTransaction{}};
const unsigned int n_in = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
const CAmount amount = ConsumeMoney(fuzzed_data_provider);
const bool store = fuzzed_data_provider.ConsumeBool();
diff --git a/src/test/fuzz/transaction.cpp b/src/test/fuzz/transaction.cpp
index 4f972dea1c..9e40883709 100644
--- a/src/test/fuzz/transaction.cpp
+++ b/src/test/fuzz/transaction.cpp
@@ -42,7 +42,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
return CTransaction(deserialize, ds);
} catch (const std::ios_base::failure&) {
valid_tx = false;
- return CTransaction();
+ return CTransaction{CMutableTransaction{}};
}
}();
bool valid_mutable_tx = true;
diff --git a/src/test/reverselock_tests.cpp b/src/test/reverselock_tests.cpp
index a42608a66d..7da364d316 100644
--- a/src/test/reverselock_tests.cpp
+++ b/src/test/reverselock_tests.cpp
@@ -48,12 +48,14 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
WAIT_LOCK(mutex, lock);
#ifdef DEBUG_LOCKORDER
+ bool prev = g_debug_lockorder_abort;
+ g_debug_lockorder_abort = false;
+
// Make sure trying to reverse lock a previous lock fails
- try {
- REVERSE_LOCK(lock2);
- BOOST_CHECK(false); // REVERSE_LOCK(lock2) succeeded
- } catch(...) { }
+ BOOST_CHECK_EXCEPTION(REVERSE_LOCK(lock2), std::logic_error, HasReason("lock2 was not most recent critical section locked"));
BOOST_CHECK(lock2.owns_lock());
+
+ g_debug_lockorder_abort = prev;
#endif
// Make sure trying to reverse lock an unlocked lock fails
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 14145ced7e..71275f69d9 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -62,6 +62,19 @@ void TestDoubleLock(bool should_throw)
g_debug_lockorder_abort = prev;
}
#endif /* DEBUG_LOCKORDER */
+
+template <typename MutexType>
+void TestInconsistentLockOrderDetected(MutexType& mutex1, MutexType& mutex2) NO_THREAD_SAFETY_ANALYSIS
+{
+ ENTER_CRITICAL_SECTION(mutex1);
+ ENTER_CRITICAL_SECTION(mutex2);
+#ifdef DEBUG_LOCKORDER
+ BOOST_CHECK_EXCEPTION(LEAVE_CRITICAL_SECTION(mutex1), std::logic_error, HasReason("mutex1 was not most recent critical section locked"));
+#endif // DEBUG_LOCKORDER
+ LEAVE_CRITICAL_SECTION(mutex2);
+ LEAVE_CRITICAL_SECTION(mutex1);
+ BOOST_CHECK(LockStackEmpty());
+}
} // namespace
BOOST_FIXTURE_TEST_SUITE(sync_tests, BasicTestingSetup)
@@ -108,4 +121,28 @@ BOOST_AUTO_TEST_CASE(double_lock_recursive_mutex)
}
#endif /* DEBUG_LOCKORDER */
+BOOST_AUTO_TEST_CASE(inconsistent_lock_order_detected)
+{
+#ifdef DEBUG_LOCKORDER
+ bool prev = g_debug_lockorder_abort;
+ g_debug_lockorder_abort = false;
+#endif // DEBUG_LOCKORDER
+
+ RecursiveMutex rmutex1, rmutex2;
+ TestInconsistentLockOrderDetected(rmutex1, rmutex2);
+ // By checking lock order consistency (CheckLastCritical) before any unlocking (LeaveCritical)
+ // the lock tracking data must not have been broken by exception.
+ TestInconsistentLockOrderDetected(rmutex1, rmutex2);
+
+ Mutex mutex1, mutex2;
+ TestInconsistentLockOrderDetected(mutex1, mutex2);
+ // By checking lock order consistency (CheckLastCritical) before any unlocking (LeaveCritical)
+ // the lock tracking data must not have been broken by exception.
+ TestInconsistentLockOrderDetected(mutex1, mutex2);
+
+#ifdef DEBUG_LOCKORDER
+ g_debug_lockorder_abort = prev;
+#endif // DEBUG_LOCKORDER
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 85aae0170d..6ed48593fb 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -53,16 +53,13 @@ bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
}
/**
- * @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
- * @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
+ * @param[in] env_directory Path to environment directory
* @return A shared pointer to the BerkeleyEnvironment object for the wallet directory, never empty because ~BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map.
* @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map.
*/
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename)
+std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory)
{
- fs::path env_directory;
- SplitWalletPath(wallet_path, env_directory, database_filename);
LOCK(cs_db);
auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
@@ -808,21 +805,14 @@ std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(bool flush_on_close)
return MakeUnique<BerkeleyBatch>(*this, false, flush_on_close);
}
-bool ExistsBerkeleyDatabase(const fs::path& path)
-{
- fs::path env_directory;
- std::string data_filename;
- SplitWalletPath(path, env_directory, data_filename);
- return IsBDBFile(env_directory / data_filename);
-}
-
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
+ fs::path data_file = BDBDataFile(path);
std::unique_ptr<BerkeleyDatabase> db;
{
LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor
- std::string data_filename;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(path, data_filename);
+ std::string data_filename = data_file.filename().string();
+ std::shared_ptr<BerkeleyEnvironment> env = GetBerkeleyEnv(data_file.parent_path());
if (env->m_databases.count(data_filename)) {
error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", (env->Directory() / data_filename).string()));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
@@ -839,28 +829,3 @@ std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, con
status = DatabaseStatus::SUCCESS;
return db;
}
-
-bool IsBDBFile(const fs::path& path)
-{
- if (!fs::exists(path)) return false;
-
- // A Berkeley DB Btree file has at least 4K.
- // This check also prevents opening lock files.
- boost::system::error_code ec;
- auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
- if (size < 4096) return false;
-
- fsbridge::ifstream file(path, std::ios::binary);
- if (!file.is_open()) return false;
-
- file.seekg(12, std::ios::beg); // Magic bytes start at offset 12
- uint32_t data = 0;
- file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic
-
- // Berkeley DB Btree magic bytes, from:
- // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75
- // - big endian systems - 00 05 31 62
- // - little endian systems - 62 31 05 00
- return data == 0x00053162 || data == 0x62310500;
-}
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index 4f97665f08..bf1617d67f 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -83,11 +83,8 @@ public:
}
};
-/** Get BerkeleyEnvironment and database filename given a wallet path. */
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
-
-/** Check format of database file */
-bool IsBDBFile(const fs::path& path);
+/** Get BerkeleyEnvironment given a directory path. */
+std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory);
class BerkeleyBatch;
@@ -226,9 +223,6 @@ public:
std::string BerkeleyDatabaseVersion();
-//! Check if Berkeley database exists at specified path.
-bool ExistsBerkeleyDatabase(const fs::path& path);
-
//! Return object giving access to Berkeley database at specified path.
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index bd1d114730..cd49baeb78 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -3,23 +3,130 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <chainparams.h>
#include <fs.h>
+#include <logging.h>
#include <wallet/db.h>
#include <string>
-void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename)
+std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
+{
+ const size_t offset = wallet_dir.string().size() + 1;
+ std::vector<fs::path> paths;
+ boost::system::error_code ec;
+
+ for (auto it = fs::recursive_directory_iterator(wallet_dir, ec); it != fs::recursive_directory_iterator(); it.increment(ec)) {
+ if (ec) {
+ LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
+ continue;
+ }
+
+ try {
+ // Get wallet path relative to walletdir by removing walletdir from the wallet path.
+ // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
+ const fs::path path = it->path().string().substr(offset);
+
+ if (it->status().type() == fs::directory_file &&
+ (IsBDBFile(BDBDataFile(it->path())) || IsSQLiteFile(SQLiteDataFile(it->path())))) {
+ // Found a directory which contains wallet.dat btree file, add it as a wallet.
+ paths.emplace_back(path);
+ } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && IsBDBFile(it->path())) {
+ if (it->path().filename() == "wallet.dat") {
+ // Found top-level wallet.dat btree file, add top level directory ""
+ // as a wallet.
+ paths.emplace_back();
+ } else {
+ // Found top-level btree file not called wallet.dat. Current bitcoin
+ // software will never create these files but will allow them to be
+ // opened in a shared database environment for backwards compatibility.
+ // Add it to the list of available wallets.
+ paths.emplace_back(path);
+ }
+ }
+ } catch (const std::exception& e) {
+ LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
+ it.no_push();
+ }
+ }
+
+ return paths;
+}
+
+fs::path BDBDataFile(const fs::path& wallet_path)
{
if (fs::is_regular_file(wallet_path)) {
// Special case for backwards compatibility: if wallet path points to an
// existing file, treat it as the path to a BDB data file in a parent
// directory that also contains BDB log files.
- env_directory = wallet_path.parent_path();
- database_filename = wallet_path.filename().string();
+ return wallet_path;
} else {
// Normal case: Interpret wallet path as a directory path containing
// data and log files.
- env_directory = wallet_path;
- database_filename = "wallet.dat";
+ return wallet_path / "wallet.dat";
}
}
+
+fs::path SQLiteDataFile(const fs::path& path)
+{
+ return path / "wallet.dat";
+}
+
+bool IsBDBFile(const fs::path& path)
+{
+ if (!fs::exists(path)) return false;
+
+ // A Berkeley DB Btree file has at least 4K.
+ // This check also prevents opening lock files.
+ boost::system::error_code ec;
+ auto size = fs::file_size(path, ec);
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (size < 4096) return false;
+
+ fsbridge::ifstream file(path, std::ios::binary);
+ if (!file.is_open()) return false;
+
+ file.seekg(12, std::ios::beg); // Magic bytes start at offset 12
+ uint32_t data = 0;
+ file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic
+
+ // Berkeley DB Btree magic bytes, from:
+ // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75
+ // - big endian systems - 00 05 31 62
+ // - little endian systems - 62 31 05 00
+ return data == 0x00053162 || data == 0x62310500;
+}
+
+bool IsSQLiteFile(const fs::path& path)
+{
+ if (!fs::exists(path)) return false;
+
+ // A SQLite Database file is at least 512 bytes.
+ boost::system::error_code ec;
+ auto size = fs::file_size(path, ec);
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (size < 512) return false;
+
+ fsbridge::ifstream file(path, std::ios::binary);
+ if (!file.is_open()) return false;
+
+ // Magic is at beginning and is 16 bytes long
+ char magic[16];
+ file.read(magic, 16);
+
+ // Application id is at offset 68 and 4 bytes long
+ file.seekg(68, std::ios::beg);
+ char app_id[4];
+ file.read(app_id, 4);
+
+ file.close();
+
+ // Check the magic, see https://sqlite.org/fileformat2.html
+ std::string magic_str(magic, 16);
+ if (magic_str != std::string("SQLite format 3", 16)) {
+ return false;
+ }
+
+ // Check the application id matches our network magic
+ return memcmp(Params().MessageStart(), app_id, 4) == 0;
+}
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 940d1cd242..2c75486a44 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -223,6 +223,14 @@ enum class DatabaseStatus {
FAILED_ENCRYPT,
};
+/** Recursively list database paths in directory. */
+std::vector<fs::path> ListDatabases(const fs::path& path);
+
std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
+fs::path BDBDataFile(const fs::path& path);
+fs::path SQLiteDataFile(const fs::path& path);
+bool IsBDBFile(const fs::path& path);
+bool IsSQLiteFile(const fs::path& path);
+
#endif // BITCOIN_WALLET_DB_H
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 7a8bc0b7f3..1bbfa197d7 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -215,7 +215,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
// We cannot source new unconfirmed inputs(bip125 rule 2)
new_coin_control.m_min_depth = 1;
- CTransactionRef tx_new = MakeTransactionRef();
+ CTransactionRef tx_new;
CAmount fee_ret;
int change_pos_in_out = -1; // No requested location for change
bilingual_str fail_reason;
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index e8dbc20e56..e4e8c50f4f 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -551,7 +551,7 @@ public:
std::vector<std::string> listWalletDir() override
{
std::vector<std::string> paths;
- for (auto& path : ListWalletDir()) {
+ for (auto& path : ListDatabases(GetWalletDir())) {
paths.push_back(path.string());
}
return paths;
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index e580c9a8ea..94a73b67df 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2537,7 +2537,7 @@ static RPCHelpMan listwalletdir()
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
UniValue wallets(UniValue::VARR);
- for (const auto& path : ListWalletDir()) {
+ for (const auto& path : ListDatabases(GetWalletDir())) {
UniValue wallet(UniValue::VOBJ);
wallet.pushKV("name", path.string());
wallets.push_back(wallet);
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index da5ca7858f..09a9ec68cd 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -32,8 +32,9 @@ bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::v
std::unique_ptr<WalletDatabase> database = MakeDatabase(file_path, options, status, error);
if (!database) return false;
- std::string filename;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename);
+ BerkeleyDatabase& berkeley_database = static_cast<BerkeleyDatabase&>(*database);
+ std::string filename = berkeley_database.Filename();
+ std::shared_ptr<BerkeleyEnvironment> env = berkeley_database.env;
if (!env->Open(error)) {
return false;
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index d278d96476..0fb3b1d3c4 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -17,7 +17,6 @@
#include <sqlite3.h>
#include <stdint.h>
-static const char* const DATABASE_FILENAME = "wallet.dat";
static constexpr int32_t WALLET_SCHEMA_VERSION = 0;
static Mutex g_sqlite_mutex;
@@ -568,17 +567,11 @@ bool SQLiteBatch::TxnAbort()
return res == SQLITE_OK;
}
-bool ExistsSQLiteDatabase(const fs::path& path)
-{
- const fs::path file = path / DATABASE_FILENAME;
- return fs::symlink_status(file).type() == fs::regular_file && IsSQLiteFile(file);
-}
-
std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
- const fs::path file = path / DATABASE_FILENAME;
try {
- auto db = MakeUnique<SQLiteDatabase>(path, file);
+ fs::path data_file = SQLiteDataFile(path);
+ auto db = MakeUnique<SQLiteDatabase>(data_file.parent_path(), data_file);
if (options.verify && !db->Verify(error)) {
status = DatabaseStatus::FAILED_VERIFY;
return nullptr;
@@ -596,37 +589,3 @@ std::string SQLiteDatabaseVersion()
{
return std::string(sqlite3_libversion());
}
-
-bool IsSQLiteFile(const fs::path& path)
-{
- if (!fs::exists(path)) return false;
-
- // A SQLite Database file is at least 512 bytes.
- boost::system::error_code ec;
- auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
- if (size < 512) return false;
-
- fsbridge::ifstream file(path, std::ios::binary);
- if (!file.is_open()) return false;
-
- // Magic is at beginning and is 16 bytes long
- char magic[16];
- file.read(magic, 16);
-
- // Application id is at offset 68 and 4 bytes long
- file.seekg(68, std::ios::beg);
- char app_id[4];
- file.read(app_id, 4);
-
- file.close();
-
- // Check the magic, see https://sqlite.org/fileformat2.html
- std::string magic_str(magic, 16);
- if (magic_str != std::string("SQLite format 3", 16)) {
- return false;
- }
-
- // Check the application id matches our network magic
- return memcmp(Params().MessageStart(), app_id, 4) == 0;
-}
diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h
index 693a2ef55a..442563184e 100644
--- a/src/wallet/sqlite.h
+++ b/src/wallet/sqlite.h
@@ -113,10 +113,8 @@ public:
sqlite3* m_db{nullptr};
};
-bool ExistsSQLiteDatabase(const fs::path& path);
std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
std::string SQLiteDatabaseVersion();
-bool IsSQLiteFile(const fs::path& path);
#endif // BITCOIN_WALLET_SQLITE_H
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 8f0083cd2e..1a28852a6b 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -13,6 +13,13 @@
BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup)
+static std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& path, std::string& database_filename)
+{
+ fs::path data_file = BDBDataFile(path);
+ database_filename = data_file.filename().string();
+ return GetBerkeleyEnv(data_file.parent_path());
+}
+
BOOST_AUTO_TEST_CASE(getwalletenv_file)
{
std::string test_name = "test_name.dat";
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index eb0bbb6ccc..a6db261914 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -28,6 +28,8 @@ RPCHelpMan importmulti();
RPCHelpMan dumpwallet();
RPCHelpMan importwallet();
+extern RecursiveMutex cs_wallets;
+
// Ensure that fee levels defined in the wallet are at least as high
// as the default levels for node policy.
static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee");
@@ -761,16 +763,18 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
// deadlock during the sync and simulates a new block notification happening
// as soon as possible.
addtx_count = 0;
- auto handler = HandleLoadWallet([&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet) {
+ auto handler = HandleLoadWallet([&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet, cs_wallets) {
BOOST_CHECK(rescan_completed);
m_coinbase_txns.push_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
block_tx = TestSimpleSpend(*m_coinbase_txns[2], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
m_coinbase_txns.push_back(CreateAndProcessBlock({block_tx}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
mempool_tx = TestSimpleSpend(*m_coinbase_txns[3], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
BOOST_CHECK(m_node.chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
+ LEAVE_CRITICAL_SECTION(cs_wallets);
LEAVE_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
SyncWithValidationInterfaceQueue();
ENTER_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
+ ENTER_CRITICAL_SECTION(cs_wallets);
});
wallet = TestLoadWallet(*m_node.chain);
BOOST_CHECK_EQUAL(addtx_count, 4);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 8350d66fa7..3e37491a23 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -52,7 +52,7 @@ const std::map<uint64_t,std::string> WALLET_FLAG_CAVEATS{
static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10;
-static RecursiveMutex cs_wallets;
+RecursiveMutex cs_wallets;
static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets);
static std::list<LoadWalletFn> g_load_wallet_fns GUARDED_BY(cs_wallets);
@@ -3113,13 +3113,14 @@ bool CWallet::CreateTransaction(
bool sign)
{
int nChangePosIn = nChangePosInOut;
- CTransactionRef tx2 = tx;
+ Assert(!tx); // tx is an out-param. TODO change the return type from bool to tx (or nullptr)
bool res = CreateTransactionInternal(vecSend, tx, nFeeRet, nChangePosInOut, error, coin_control, fee_calc_out, sign);
// try with avoidpartialspends unless it's enabled already
if (res && nFeeRet > 0 /* 0 means non-functional fee rate estimation */ && m_max_aps_fee > -1 && !coin_control.m_avoid_partial_spends) {
CCoinControl tmp_cc = coin_control;
tmp_cc.m_avoid_partial_spends = true;
CAmount nFeeRet2;
+ CTransactionRef tx2;
int nChangePosInOut2 = nChangePosIn;
bilingual_str error2; // fired and forgotten; if an error occurs, we discard the results
if (CreateTransactionInternal(vecSend, tx2, nFeeRet2, nChangePosInOut2, error2, tmp_cc, fee_calc_out, sign)) {
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index c0521d3386..5b72a01939 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -1013,13 +1013,10 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
Optional<DatabaseFormat> format;
if (exists) {
-#ifdef USE_BDB
- if (ExistsBerkeleyDatabase(path)) {
+ if (IsBDBFile(BDBDataFile(path))) {
format = DatabaseFormat::BERKELEY;
}
-#endif
-#ifdef USE_SQLITE
- if (ExistsSQLiteDatabase(path)) {
+ if (IsSQLiteFile(SQLiteDataFile(path))) {
if (format) {
error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", path.string()));
status = DatabaseStatus::FAILED_BAD_FORMAT;
@@ -1027,7 +1024,6 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
}
format = DatabaseFormat::SQLITE;
}
-#endif
} else if (options.require_existing) {
error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", path.string()));
status = DatabaseStatus::FAILED_NOT_FOUND;
diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp
index d6e6f015db..16ddad3a84 100644
--- a/src/wallet/walletutil.cpp
+++ b/src/wallet/walletutil.cpp
@@ -7,17 +7,6 @@
#include <logging.h>
#include <util/system.h>
-#ifdef USE_BDB
-bool ExistsBerkeleyDatabase(const fs::path& path);
-#else
-# define ExistsBerkeleyDatabase(path) (false)
-#endif
-#ifdef USE_SQLITE
-bool ExistsSQLiteDatabase(const fs::path& path);
-#else
-# define ExistsSQLiteDatabase(path) (false)
-#endif
-
fs::path GetWalletDir()
{
fs::path path;
@@ -40,50 +29,6 @@ fs::path GetWalletDir()
return path;
}
-std::vector<fs::path> ListWalletDir()
-{
- const fs::path wallet_dir = GetWalletDir();
- const size_t offset = wallet_dir.string().size() + 1;
- std::vector<fs::path> paths;
- boost::system::error_code ec;
-
- for (auto it = fs::recursive_directory_iterator(wallet_dir, ec); it != fs::recursive_directory_iterator(); it.increment(ec)) {
- if (ec) {
- LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
- continue;
- }
-
- try {
- // Get wallet path relative to walletdir by removing walletdir from the wallet path.
- // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
- const fs::path path = it->path().string().substr(offset);
-
- if (it->status().type() == fs::directory_file &&
- (ExistsBerkeleyDatabase(it->path()) || ExistsSQLiteDatabase(it->path()))) {
- // Found a directory which contains wallet.dat btree file, add it as a wallet.
- paths.emplace_back(path);
- } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && ExistsBerkeleyDatabase(it->path())) {
- if (it->path().filename() == "wallet.dat") {
- // Found top-level wallet.dat btree file, add top level directory ""
- // as a wallet.
- paths.emplace_back();
- } else {
- // Found top-level btree file not called wallet.dat. Current bitcoin
- // software will never create these files but will allow them to be
- // opened in a shared database environment for backwards compatibility.
- // Add it to the list of available wallets.
- paths.emplace_back(path);
- }
- }
- } catch (const std::exception& e) {
- LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
- it.no_push();
- }
- }
-
- return paths;
-}
-
bool IsFeatureSupported(int wallet_version, int feature_version)
{
return wallet_version >= feature_version;
diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h
index 27521abd81..d4143ceff4 100644
--- a/src/wallet/walletutil.h
+++ b/src/wallet/walletutil.h
@@ -65,9 +65,6 @@ enum WalletFlags : uint64_t {
//! Get the path of the wallet directory.
fs::path GetWalletDir();
-//! Get wallets in wallet directory.
-std::vector<fs::path> ListWalletDir();
-
/** Descriptor with some wallet metadata */
class WalletDescriptor
{
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index db72a361d9..c0b3c2cb12 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -18,6 +18,7 @@ from test_framework.messages import (
msg_inv,
msg_ping,
MSG_TX,
+ msg_version,
ser_string,
)
from test_framework.p2p import (
@@ -60,6 +61,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def run_test(self):
self.test_buffer()
+ self.test_duplicate_version_msg()
self.test_magic_bytes()
self.test_checksum()
self.test_size()
@@ -92,6 +94,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
+ def test_duplicate_version_msg(self):
+ self.log.info("Test duplicate version message is ignored")
+ conn = self.nodes[0].add_p2p_connection(P2PDataStore())
+ with self.nodes[0].assert_debug_log(['redundant version message from peer']):
+ conn.send_and_ping(msg_version())
+ self.nodes[0].disconnect_p2ps()
+
def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index 4b32d60db0..ca8bf908a9 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -24,8 +24,6 @@ from test_framework.util import (
assert_greater_than_or_equal,
)
-DISCOURAGEMENT_THRESHOLD = 100
-
class LazyPeer(P2PInterface):
def __init__(self):
@@ -93,27 +91,16 @@ class P2PLeakTest(BitcoinTestFramework):
self.num_nodes = 1
def run_test(self):
- # Peer that never sends a version. We will send a bunch of messages
- # from this peer anyway and verify eventual disconnection.
- no_version_disconnect_peer = self.nodes[0].add_p2p_connection(
- LazyPeer(), send_version=False, wait_for_verack=False)
-
# Another peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)
# Peer that sends a version but not a verack.
no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)
- # Send enough ping messages (any non-version message will do) prior to sending
- # version to reach the peer discouragement threshold. This should get us disconnected.
- for _ in range(DISCOURAGEMENT_THRESHOLD):
- no_version_disconnect_peer.send_message(msg_ping())
-
# Wait until we got the verack in response to the version. Though, don't wait for the node to receive the
# verack, since we never sent one
no_verack_idle_peer.wait_for_verack()
- no_version_disconnect_peer.wait_until(lambda: no_version_disconnect_peer.ever_connected, check_connected=False)
no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
@@ -123,13 +110,9 @@ class P2PLeakTest(BitcoinTestFramework):
#Give the node enough time to possibly leak out a message
time.sleep(5)
- # Expect this peer to be disconnected for misbehavior
- assert not no_version_disconnect_peer.is_connected
-
self.nodes[0].disconnect_p2ps()
# Make sure no unexpected messages came in
- assert no_version_disconnect_peer.unexpected_msg == False
assert no_version_idle_peer.unexpected_msg == False
assert no_verack_idle_peer.unexpected_msg == False
@@ -148,7 +131,7 @@ class P2PLeakTest(BitcoinTestFramework):
p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
old_version_msg = msg_version()
old_version_msg.nVersion = 31799
- with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
+ with self.nodes[0].assert_debug_log(['peer=3 using obsolete version 31799; disconnecting']):
p2p_old_peer.send_message(old_version_msg)
p2p_old_peer.wait_for_disconnect()
diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py
index ce12ce26ce..47832b04bf 100755
--- a/test/functional/p2p_timeouts.py
+++ b/test/functional/p2p_timeouts.py
@@ -57,8 +57,10 @@ class TimeoutsTest(BitcoinTestFramework):
assert no_version_node.is_connected
assert no_send_node.is_connected
- no_verack_node.send_message(msg_ping())
- no_version_node.send_message(msg_ping())
+ with self.nodes[0].assert_debug_log(['Unsupported message "ping" prior to verack from peer=0']):
+ no_verack_node.send_message(msg_ping())
+ with self.nodes[0].assert_debug_log(['non-version message before version handshake. Message "ping" from peer=1']):
+ no_version_node.send_message(msg_ping())
sleep(1)
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 0a5b7f551c..a618706a77 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -482,11 +482,8 @@ class TestNode():
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
- self.wait_for_rpc_connection()
- self.stop_node()
- self.wait_until_stopped()
- except FailedToStartError as e:
- self.log.debug('bitcoind failed to start: %s', e)
+ ret = self.process.wait(timeout=self.rpc_timeout)
+ self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization'))
self.running = False
self.process = None
# Check stderr for expected message
@@ -505,11 +502,15 @@ class TestNode():
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
- else:
+ except subprocess.TimeoutExpired:
+ self.process.kill()
+ self.running = False
+ self.process = None
+ assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s '
if expected_msg is None:
- assert_msg = "bitcoind should have exited with an error"
+ assert_msg += "with an error"
else:
- assert_msg = "bitcoind should have exited with expected error " + expected_msg
+ assert_msg += "with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan
index 48f81f3dbf..986e096056 100644
--- a/test/sanitizer_suppressions/tsan
+++ b/test/sanitizer_suppressions/tsan
@@ -32,7 +32,6 @@ deadlock:CConnman::ForNode
deadlock:CConnman::GetNodeStats
deadlock:CChainState::ConnectTip
deadlock:UpdateTip
-deadlock:wallet_tests::CreateWallet
# WalletBatch (unidentified deadlock)
deadlock:WalletBatch