aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am11
-rwxr-xr-xci/test/00_setup_env.sh1
-rw-r--r--ci/test/00_setup_env_mac_host.sh1
-rw-r--r--ci/test/00_setup_env_native_qt5.sh1
-rw-r--r--ci/test/00_setup_env_win64.sh1
-rwxr-xr-xci/test/06_script_b.sh6
-rw-r--r--configure.ac3
-rwxr-xr-xcontrib/devtools/test-security-check.py7
-rw-r--r--contrib/testgen/base58.py2
-rw-r--r--src/Makefile.am2
-rw-r--r--src/chainparams.cpp1
-rw-r--r--src/fs.cpp5
-rw-r--r--src/interfaces/chain.h1
-rw-r--r--src/qt/coincontroldialog.cpp2
-rw-r--r--src/qt/rpcconsole.cpp1
-rw-r--r--src/qt/transactionrecord.cpp1
-rw-r--r--src/sync.h6
-rw-r--r--src/threadsafety.h7
-rw-r--r--src/wallet/bdb.cpp805
-rw-r--r--src/wallet/bdb.h337
-rw-r--r--src/wallet/db.cpp740
-rw-r--r--src/wallet/db.h379
-rw-r--r--src/wallet/test/db_tests.cpp2
-rw-r--r--src/wallet/wallet.cpp7
-rw-r--r--src/wallet/walletdb.cpp5
-rw-r--r--src/wallet/walletdb.h4
-rwxr-xr-xtest/functional/feature_backwards_compatibility.py3
-rwxr-xr-xtest/functional/mempool_compatibility.py5
-rwxr-xr-xtest/functional/p2p_filter.py6
-rwxr-xr-xtest/functional/p2p_leak.py3
-rwxr-xr-xtest/functional/p2p_nobloomfilter_messages.py17
-rwxr-xr-xtest/functional/p2p_node_network_limited.py1
-rwxr-xr-xtest/functional/test_framework/test_framework.py12
-rwxr-xr-xtest/functional/test_framework/test_node.py6
-rw-r--r--test/functional/test_framework/util.py64
-rwxr-xr-xtest/functional/wallet_upgradewallet.py4
36 files changed, 1289 insertions, 1170 deletions
diff --git a/Makefile.am b/Makefile.am
index 45dab3930d..75a164f49e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -342,3 +342,14 @@ clean-local: clean-docs
rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP)
rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__
rm -rf osx_volname dist/ dpi36.background.tiff dpi72.background.tiff
+
+test-security-check:
+if TARGET_DARWIN
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
+endif
+if TARGET_WINDOWS
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
+endif
+if TARGET_LINUX
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
+endif
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 56b8fc976e..711f436630 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -33,6 +33,7 @@ export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")}
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
+export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false}
export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index 982e38daee..b24dc7278d 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -11,6 +11,7 @@ export PIP_PACKAGES="zmq"
export GOAL="install"
export BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror"
export TEST_RUNNER_EXTRA="wallet_disable" # Only run wallet_disable as a smoke test, see https://github.com/bitcoin/bitcoin/pull/17240#issuecomment-546022121 why the other tests are disabled
+export RUN_SECURITY_TESTS="true"
# Run without depends
export NO_DEPENDS=1
export OSX_SDK=""
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index fa07990756..496251a125 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -10,6 +10,7 @@ export CONTAINER_NAME=ci_native_qt5
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
+export RUN_SECURITY_TESTS="true"
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 8f0c62a1a6..90c3ebd28e 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -10,5 +10,6 @@ export CONTAINER_NAME=ci_win64
export HOST=x86_64-w64-mingw32
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
+export RUN_SECURITY_TESTS="true"
export GOAL="deploy"
export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests"
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 0d23d9eed2..96d44328b8 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -39,6 +39,12 @@ if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
END_FOLD
fi
+if [ "$RUN_SECURITY_TESTS" = "true" ]; then
+ BEGIN_FOLD security-tests
+ DOCKER_EXEC make test-security-check
+ END_FOLD
+fi
+
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
BEGIN_FOLD fuzz-tests
DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} $MAKEJOBS -l DEBUG ${DIR_FUZZ_IN}
diff --git a/configure.ac b/configure.ac
index 0218bf3022..474d8a7919 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1600,6 +1600,7 @@ fi
AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin])
AM_CONDITIONAL([BUILD_DARWIN], [test x$BUILD_OS = xdarwin])
+AM_CONDITIONAL([TARGET_LINUX], [test x$TARGET_OS = xlinux])
AM_CONDITIONAL([TARGET_WINDOWS], [test x$TARGET_OS = xwindows])
AM_CONDITIONAL([ENABLE_WALLET],[test x$enable_wallet = xyes])
AM_CONDITIONAL([ENABLE_TESTS],[test x$BUILD_TEST = xyes])
@@ -1688,6 +1689,8 @@ AC_SUBST(HAVE_WEAK_GETAUXVAL)
AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini])
AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])])
+AC_CONFIG_LINKS([contrib/devtools/security-check.py:contrib/devtools/security-check.py])
+AC_CONFIG_LINKS([contrib/devtools/test-security-check.py:contrib/devtools/test-security-check.py])
AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py])
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py])
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index ea70b27941..629eba4f28 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -20,10 +20,9 @@ def write_testcode(filename):
''')
def call_security_check(cc, source, executable, options):
- subprocess.check_call([cc,source,'-o',executable] + options)
- p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
- (stdout, stderr) = p.communicate()
- return (p.returncode, stdout.rstrip())
+ subprocess.run([cc,source,'-o',executable] + options, check=True)
+ p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
+ return (p.returncode, p.stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py
index da67cb2d90..c7ebac50d4 100644
--- a/contrib/testgen/base58.py
+++ b/contrib/testgen/base58.py
@@ -107,7 +107,7 @@ def get_bcaddress_version(strAddress):
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
- assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
+ assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') == 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
diff --git a/src/Makefile.am b/src/Makefile.am
index 7a280a67a7..a33ff8a461 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -240,6 +240,7 @@ BITCOIN_CORE_H = \
versionbits.h \
versionbitsinfo.h \
walletinitinterface.h \
+ wallet/bdb.h \
wallet/coincontrol.h \
wallet/context.h \
wallet/crypter.h \
@@ -350,6 +351,7 @@ libbitcoin_wallet_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_wallet_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_wallet_a_SOURCES = \
interfaces/wallet.cpp \
+ wallet/bdb.cpp \
wallet/coincontrol.cpp \
wallet/context.cpp \
wallet/crypter.cpp \
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 799474fae2..092c45e4ce 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -121,6 +121,7 @@ public:
vSeeds.emplace_back("seed.btc.petertodd.org"); // Peter Todd, only supports x1, x5, x9, and xd
vSeeds.emplace_back("seed.bitcoin.sprovoost.nl"); // Sjors Provoost
vSeeds.emplace_back("dnsseed.emzy.de"); // Stephan Oeste
+ vSeeds.emplace_back("seed.bitcoin.wiz.biz"); // Jason Maurice
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
diff --git a/src/fs.cpp b/src/fs.cpp
index e68c97b3ca..eef9c81de9 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -5,10 +5,12 @@
#include <fs.h>
#ifndef WIN32
+#include <cstring>
#include <fcntl.h>
#include <string>
#include <sys/file.h>
#include <sys/utsname.h>
+#include <unistd.h>
#else
#ifndef NOMINMAX
#define NOMINMAX
@@ -31,7 +33,8 @@ FILE *fopen(const fs::path& p, const char *mode)
#ifndef WIN32
-static std::string GetErrorReason() {
+static std::string GetErrorReason()
+{
return std::strerror(errno);
}
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 61d7ddb934..65695707f7 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -8,6 +8,7 @@
#include <optional.h> // For Optional and nullopt
#include <primitives/transaction.h> // For CTransactionRef
+#include <functional>
#include <memory>
#include <stddef.h>
#include <stdint.h>
diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp
index db77c17df0..f44a9f285a 100644
--- a/src/qt/coincontroldialog.cpp
+++ b/src/qt/coincontroldialog.cpp
@@ -400,7 +400,6 @@ void CoinControlDialog::updateLabels(CCoinControl& m_coin_control, WalletModel *
// nPayAmount
CAmount nPayAmount = 0;
bool fDust = false;
- CMutableTransaction txDummy;
for (const CAmount &amount : CoinControlDialog::payAmounts)
{
nPayAmount += amount;
@@ -409,7 +408,6 @@ void CoinControlDialog::updateLabels(CCoinControl& m_coin_control, WalletModel *
{
// Assumes a p2pkh script size
CTxOut txout(amount, CScript() << std::vector<unsigned char>(24, 0));
- txDummy.vout.push_back(txout);
fDust |= IsDust(txout, model->node().getDustRelayFee());
}
}
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 0f89d4e6fe..dafd517ca8 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -24,6 +24,7 @@
#include <univalue.h>
#ifdef ENABLE_WALLET
+#include <wallet/bdb.h>
#include <wallet/db.h>
#include <wallet/wallet.h>
#endif
diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp
index 01dff8069c..52007ef350 100644
--- a/src/qt/transactionrecord.cpp
+++ b/src/qt/transactionrecord.cpp
@@ -47,7 +47,6 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const interface
if(mine)
{
TransactionRecord sub(hash, nTime);
- CTxDestination address;
sub.idx = i; // vout index
sub.credit = txout.nValue;
sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY;
diff --git a/src/sync.h b/src/sync.h
index 60e5a87aec..77327d8bfe 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -103,6 +103,12 @@ public:
}
using UniqueLock = std::unique_lock<PARENT>;
+#ifdef __clang__
+ //! For negative capabilities in the Clang Thread Safety Analysis.
+ //! A negative requirement uses the EXCLUSIVE_LOCKS_REQUIRED attribute, in conjunction
+ //! with the ! operator, to indicate that a mutex should not be held.
+ const AnnotatedMixin& operator!() const { return *this; }
+#endif // __clang__
};
/**
diff --git a/src/threadsafety.h b/src/threadsafety.h
index 942aa3fdcd..5f2c40bac6 100644
--- a/src/threadsafety.h
+++ b/src/threadsafety.h
@@ -60,6 +60,13 @@
// and should only be used when sync.h Mutex/LOCK/etc are not usable.
class LOCKABLE StdMutex : public std::mutex
{
+public:
+#ifdef __clang__
+ //! For negative capabilities in the Clang Thread Safety Analysis.
+ //! A negative requirement uses the EXCLUSIVE_LOCKS_REQUIRED attribute, in conjunction
+ //! with the ! operator, to indicate that a mutex should not be held.
+ const StdMutex& operator!() const { return *this; }
+#endif // __clang__
};
// StdLockGuard provides an annotated version of std::lock_guard for us,
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
new file mode 100644
index 0000000000..7ed9c88122
--- /dev/null
+++ b/src/wallet/bdb.cpp
@@ -0,0 +1,805 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <wallet/bdb.h>
+#include <wallet/db.h>
+
+#include <util/strencodings.h>
+#include <util/translation.h>
+
+#include <stdint.h>
+
+#ifndef WIN32
+#include <sys/stat.h>
+#endif
+
+namespace {
+
+//! Make sure database has a unique fileid within the environment. If it
+//! doesn't, throw an error. BDB caches do not work properly when more than one
+//! open database has the same fileid (values written to one database may show
+//! up in reads to other databases).
+//!
+//! BerkeleyDB generates unique fileids by default
+//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
+//! so bitcoin should never create different databases with the same fileid, but
+//! this error can be triggered if users manually copy database files.
+void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filename, Db& db, WalletDatabaseFileId& fileid)
+{
+ if (env.IsMock()) return;
+
+ int ret = db.get_mpf()->get_fileid(fileid.value);
+ if (ret != 0) {
+ throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret));
+ }
+
+ for (const auto& item : env.m_fileids) {
+ if (fileid == item.second && &fileid != &item.second) {
+ throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s)", filename,
+ HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first));
+ }
+ }
+}
+
+RecursiveMutex cs_db;
+std::map<std::string, std::weak_ptr<BerkeleyEnvironment>> g_dbenvs GUARDED_BY(cs_db); //!< Map from directory name to db environment.
+} // namespace
+
+bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
+{
+ return memcmp(value, &rhs.value, sizeof(value)) == 0;
+}
+
+bool IsBDBWalletLoaded(const fs::path& wallet_path)
+{
+ fs::path env_directory;
+ std::string database_filename;
+ SplitWalletPath(wallet_path, env_directory, database_filename);
+ LOCK(cs_db);
+ auto env = g_dbenvs.find(env_directory.string());
+ if (env == g_dbenvs.end()) return false;
+ auto database = env->second.lock();
+ return database && database->IsDatabaseLoaded(database_filename);
+}
+
+/**
+ * @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
+ * @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
+ * @return A shared pointer to the BerkeleyEnvironment object for the wallet directory, never empty because ~BerkeleyEnvironment
+ * erases the weak pointer from the g_dbenvs map.
+ * @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map.
+ */
+std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename)
+{
+ fs::path env_directory;
+ SplitWalletPath(wallet_path, env_directory, database_filename);
+ LOCK(cs_db);
+ auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
+ if (inserted.second) {
+ auto env = std::make_shared<BerkeleyEnvironment>(env_directory.string());
+ inserted.first->second = env;
+ return env;
+ }
+ return inserted.first->second.lock();
+}
+
+//
+// BerkeleyBatch
+//
+
+void BerkeleyEnvironment::Close()
+{
+ if (!fDbEnvInit)
+ return;
+
+ fDbEnvInit = false;
+
+ for (auto& db : m_databases) {
+ auto count = mapFileUseCount.find(db.first);
+ assert(count == mapFileUseCount.end() || count->second == 0);
+ BerkeleyDatabase& database = db.second.get();
+ if (database.m_db) {
+ database.m_db->close(0);
+ database.m_db.reset();
+ }
+ }
+
+ FILE* error_file = nullptr;
+ dbenv->get_errfile(&error_file);
+
+ int ret = dbenv->close(0);
+ if (ret != 0)
+ LogPrintf("BerkeleyEnvironment::Close: Error %d closing database environment: %s\n", ret, DbEnv::strerror(ret));
+ if (!fMockDb)
+ DbEnv((u_int32_t)0).remove(strPath.c_str(), 0);
+
+ if (error_file) fclose(error_file);
+
+ UnlockDirectory(strPath, ".walletlock");
+}
+
+void BerkeleyEnvironment::Reset()
+{
+ dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS));
+ fDbEnvInit = false;
+ fMockDb = false;
+}
+
+BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(dir_path.string())
+{
+ Reset();
+}
+
+BerkeleyEnvironment::~BerkeleyEnvironment()
+{
+ LOCK(cs_db);
+ g_dbenvs.erase(strPath);
+ Close();
+}
+
+bool BerkeleyEnvironment::Open(bool retry)
+{
+ if (fDbEnvInit) {
+ return true;
+ }
+
+ fs::path pathIn = strPath;
+ TryCreateDirectories(pathIn);
+ if (!LockDirectory(pathIn, ".walletlock")) {
+ LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it.\n", strPath);
+ return false;
+ }
+
+ fs::path pathLogDir = pathIn / "database";
+ TryCreateDirectories(pathLogDir);
+ fs::path pathErrorFile = pathIn / "db.log";
+ LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string());
+
+ unsigned int nEnvFlags = 0;
+ if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB))
+ nEnvFlags |= DB_PRIVATE;
+
+ dbenv->set_lg_dir(pathLogDir.string().c_str());
+ dbenv->set_cachesize(0, 0x100000, 1); // 1 MiB should be enough for just the wallet
+ dbenv->set_lg_bsize(0x10000);
+ dbenv->set_lg_max(1048576);
+ dbenv->set_lk_max_locks(40000);
+ dbenv->set_lk_max_objects(40000);
+ dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); /// debug
+ dbenv->set_flags(DB_AUTO_COMMIT, 1);
+ dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1);
+ dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1);
+ int ret = dbenv->open(strPath.c_str(),
+ DB_CREATE |
+ DB_INIT_LOCK |
+ DB_INIT_LOG |
+ DB_INIT_MPOOL |
+ DB_INIT_TXN |
+ DB_THREAD |
+ DB_RECOVER |
+ nEnvFlags,
+ S_IRUSR | S_IWUSR);
+ if (ret != 0) {
+ LogPrintf("BerkeleyEnvironment::Open: Error %d opening database environment: %s\n", ret, DbEnv::strerror(ret));
+ int ret2 = dbenv->close(0);
+ if (ret2 != 0) {
+ LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2));
+ }
+ Reset();
+ if (retry) {
+ // try moving the database env out of the way
+ fs::path pathDatabaseBak = pathIn / strprintf("database.%d.bak", GetTime());
+ try {
+ fs::rename(pathLogDir, pathDatabaseBak);
+ LogPrintf("Moved old %s to %s. Retrying.\n", pathLogDir.string(), pathDatabaseBak.string());
+ } catch (const fs::filesystem_error&) {
+ // failure is ok (well, not really, but it's not worse than what we started with)
+ }
+ // try opening it again one more time
+ if (!Open(false /* retry */)) {
+ // if it still fails, it probably means we can't even create the database env
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ fDbEnvInit = true;
+ fMockDb = false;
+ return true;
+}
+
+//! Construct an in-memory mock Berkeley environment for testing
+BerkeleyEnvironment::BerkeleyEnvironment()
+{
+ Reset();
+
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::MakeMock\n");
+
+ dbenv->set_cachesize(1, 0, 1);
+ dbenv->set_lg_bsize(10485760 * 4);
+ dbenv->set_lg_max(10485760);
+ dbenv->set_lk_max_locks(10000);
+ dbenv->set_lk_max_objects(10000);
+ dbenv->set_flags(DB_AUTO_COMMIT, 1);
+ dbenv->log_set_config(DB_LOG_IN_MEMORY, 1);
+ int ret = dbenv->open(nullptr,
+ DB_CREATE |
+ DB_INIT_LOCK |
+ DB_INIT_LOG |
+ DB_INIT_MPOOL |
+ DB_INIT_TXN |
+ DB_THREAD |
+ DB_PRIVATE,
+ S_IRUSR | S_IWUSR);
+ if (ret > 0) {
+ throw std::runtime_error(strprintf("BerkeleyEnvironment::MakeMock: Error %d opening database environment.", ret));
+ }
+
+ fDbEnvInit = true;
+ fMockDb = true;
+}
+
+bool BerkeleyEnvironment::Verify(const std::string& strFile)
+{
+ LOCK(cs_db);
+ assert(mapFileUseCount.count(strFile) == 0);
+
+ Db db(dbenv.get(), 0);
+ int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
+ return result == 0;
+}
+
+BerkeleyBatch::SafeDbt::SafeDbt()
+{
+ m_dbt.set_flags(DB_DBT_MALLOC);
+}
+
+BerkeleyBatch::SafeDbt::SafeDbt(void* data, size_t size)
+ : m_dbt(data, size)
+{
+}
+
+BerkeleyBatch::SafeDbt::~SafeDbt()
+{
+ if (m_dbt.get_data() != nullptr) {
+ // Clear memory, e.g. in case it was a private key
+ memory_cleanse(m_dbt.get_data(), m_dbt.get_size());
+ // under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
+ // freed by the caller.
+ // https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
+ if (m_dbt.get_flags() & DB_DBT_MALLOC) {
+ free(m_dbt.get_data());
+ }
+ }
+}
+
+const void* BerkeleyBatch::SafeDbt::get_data() const
+{
+ return m_dbt.get_data();
+}
+
+u_int32_t BerkeleyBatch::SafeDbt::get_size() const
+{
+ return m_dbt.get_size();
+}
+
+BerkeleyBatch::SafeDbt::operator Dbt*()
+{
+ return &m_dbt;
+}
+
+bool BerkeleyBatch::VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr)
+{
+ std::string walletFile;
+ std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile);
+ fs::path walletDir = env->Directory();
+
+ LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
+ LogPrintf("Using wallet %s\n", file_path.string());
+
+ if (!env->Open(true /* retry */)) {
+ errorStr = strprintf(_("Error initializing wallet database environment %s!"), walletDir);
+ return false;
+ }
+
+ return true;
+}
+
+bool BerkeleyBatch::VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr)
+{
+ std::string walletFile;
+ std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile);
+ fs::path walletDir = env->Directory();
+
+ if (fs::exists(walletDir / walletFile))
+ {
+ if (!env->Verify(walletFile)) {
+ errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), walletFile);
+ return false;
+ }
+ }
+ // also return true if files does not exists
+ return true;
+}
+
+void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile)
+{
+ dbenv->txn_checkpoint(0, 0, 0);
+ if (fMockDb)
+ return;
+ dbenv->lsn_reset(strFile.c_str(), 0);
+}
+
+
+BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr)
+{
+ fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w'));
+ fFlushOnClose = fFlushOnCloseIn;
+ env = database.env.get();
+ if (database.IsDummy()) {
+ return;
+ }
+ const std::string &strFilename = database.strFile;
+
+ bool fCreate = strchr(pszMode, 'c') != nullptr;
+ unsigned int nFlags = DB_THREAD;
+ if (fCreate)
+ nFlags |= DB_CREATE;
+
+ {
+ LOCK(cs_db);
+ if (!env->Open(false /* retry */))
+ throw std::runtime_error("BerkeleyBatch: Failed to open database environment.");
+
+ pdb = database.m_db.get();
+ if (pdb == nullptr) {
+ int ret;
+ std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0);
+
+ bool fMockDb = env->IsMock();
+ if (fMockDb) {
+ DbMpoolFile* mpf = pdb_temp->get_mpf();
+ ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
+ if (ret != 0) {
+ throw std::runtime_error(strprintf("BerkeleyBatch: Failed to configure for no temp file backing for database %s", strFilename));
+ }
+ }
+
+ ret = pdb_temp->open(nullptr, // Txn pointer
+ fMockDb ? nullptr : strFilename.c_str(), // Filename
+ fMockDb ? strFilename.c_str() : "main", // Logical db name
+ DB_BTREE, // Database type
+ nFlags, // Flags
+ 0);
+
+ if (ret != 0) {
+ throw std::runtime_error(strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename));
+ }
+
+ // Call CheckUniqueFileid on the containing BDB environment to
+ // avoid BDB data consistency bugs that happen when different data
+ // files in the same environment have the same fileid.
+ //
+ // Also call CheckUniqueFileid on all the other g_dbenvs to prevent
+ // bitcoin from opening the same data file through another
+ // environment when the file is referenced through equivalent but
+ // not obviously identical symlinked or hard linked or bind mounted
+ // paths. In the future a more relaxed check for equal inode and
+ // device ids could be done instead, which would allow opening
+ // different backup copies of a wallet at the same time. Maybe even
+ // more ideally, an exclusive lock for accessing the database could
+ // be implemented, so no equality checks are needed at all. (Newer
+ // versions of BDB have an set_lk_exclusive method for this
+ // purpose, but the older version we use does not.)
+ for (const auto& env : g_dbenvs) {
+ CheckUniqueFileid(*env.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]);
+ }
+
+ pdb = pdb_temp.release();
+ database.m_db.reset(pdb);
+
+ if (fCreate && !Exists(std::string("version"))) {
+ bool fTmp = fReadOnly;
+ fReadOnly = false;
+ Write(std::string("version"), CLIENT_VERSION);
+ fReadOnly = fTmp;
+ }
+ }
+ ++env->mapFileUseCount[strFilename];
+ strFile = strFilename;
+ }
+}
+
+void BerkeleyBatch::Flush()
+{
+ if (activeTxn)
+ return;
+
+ // Flush database activity from memory pool to disk log
+ unsigned int nMinutes = 0;
+ if (fReadOnly)
+ nMinutes = 1;
+
+ if (env) { // env is nullptr for dummy databases (i.e. in tests). Don't actually flush if env is nullptr so we don't segfault
+ env->dbenv->txn_checkpoint(nMinutes ? gArgs.GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 : 0, nMinutes, 0);
+ }
+}
+
+void BerkeleyDatabase::IncrementUpdateCounter()
+{
+ ++nUpdateCounter;
+}
+
+void BerkeleyBatch::Close()
+{
+ if (!pdb)
+ return;
+ if (activeTxn)
+ activeTxn->abort();
+ activeTxn = nullptr;
+ pdb = nullptr;
+
+ if (fFlushOnClose)
+ Flush();
+
+ {
+ LOCK(cs_db);
+ --env->mapFileUseCount[strFile];
+ }
+ env->m_db_in_use.notify_all();
+}
+
+void BerkeleyEnvironment::CloseDb(const std::string& strFile)
+{
+ {
+ LOCK(cs_db);
+ auto it = m_databases.find(strFile);
+ assert(it != m_databases.end());
+ BerkeleyDatabase& database = it->second.get();
+ if (database.m_db) {
+ // Close the database handle
+ database.m_db->close(0);
+ database.m_db.reset();
+ }
+ }
+}
+
+void BerkeleyEnvironment::ReloadDbEnv()
+{
+ // Make sure that no Db's are in use
+ AssertLockNotHeld(cs_db);
+ std::unique_lock<RecursiveMutex> lock(cs_db);
+ m_db_in_use.wait(lock, [this](){
+ for (auto& count : mapFileUseCount) {
+ if (count.second > 0) return false;
+ }
+ return true;
+ });
+
+ std::vector<std::string> filenames;
+ for (auto it : m_databases) {
+ filenames.push_back(it.first);
+ }
+ // Close the individual Db's
+ for (const std::string& filename : filenames) {
+ CloseDb(filename);
+ }
+ // Reset the environment
+ Flush(true); // This will flush and close the environment
+ Reset();
+ Open(true);
+}
+
+bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip)
+{
+ if (database.IsDummy()) {
+ return true;
+ }
+ BerkeleyEnvironment *env = database.env.get();
+ const std::string& strFile = database.strFile;
+ while (true) {
+ {
+ LOCK(cs_db);
+ if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) {
+ // Flush log data to the dat file
+ env->CloseDb(strFile);
+ env->CheckpointLSN(strFile);
+ env->mapFileUseCount.erase(strFile);
+
+ bool fSuccess = true;
+ LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
+ std::string strFileRes = strFile + ".rewrite";
+ { // surround usage of db with extra {}
+ BerkeleyBatch db(database, "r");
+ std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0);
+
+ int ret = pdbCopy->open(nullptr, // Txn pointer
+ strFileRes.c_str(), // Filename
+ "main", // Logical db name
+ DB_BTREE, // Database type
+ DB_CREATE, // Flags
+ 0);
+ if (ret > 0) {
+ LogPrintf("BerkeleyBatch::Rewrite: Can't create database file %s\n", strFileRes);
+ fSuccess = false;
+ }
+
+ Dbc* pcursor = db.GetCursor();
+ if (pcursor)
+ while (fSuccess) {
+ CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ CDataStream ssValue(SER_DISK, CLIENT_VERSION);
+ int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue);
+ if (ret1 == DB_NOTFOUND) {
+ pcursor->close();
+ break;
+ } else if (ret1 != 0) {
+ pcursor->close();
+ fSuccess = false;
+ break;
+ }
+ if (pszSkip &&
+ strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0)
+ continue;
+ if (strncmp(ssKey.data(), "\x07version", 8) == 0) {
+ // Update version:
+ ssValue.clear();
+ ssValue << CLIENT_VERSION;
+ }
+ Dbt datKey(ssKey.data(), ssKey.size());
+ Dbt datValue(ssValue.data(), ssValue.size());
+ int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE);
+ if (ret2 > 0)
+ fSuccess = false;
+ }
+ if (fSuccess) {
+ db.Close();
+ env->CloseDb(strFile);
+ if (pdbCopy->close(0))
+ fSuccess = false;
+ } else {
+ pdbCopy->close(0);
+ }
+ }
+ if (fSuccess) {
+ Db dbA(env->dbenv.get(), 0);
+ if (dbA.remove(strFile.c_str(), nullptr, 0))
+ fSuccess = false;
+ Db dbB(env->dbenv.get(), 0);
+ if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0))
+ fSuccess = false;
+ }
+ if (!fSuccess)
+ LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite database file %s\n", strFileRes);
+ return fSuccess;
+ }
+ }
+ UninterruptibleSleep(std::chrono::milliseconds{100});
+ }
+}
+
+
+void BerkeleyEnvironment::Flush(bool fShutdown)
+{
+ int64_t nStart = GetTimeMillis();
+ // Flush log data to the actual data file on all files that are not in use
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: [%s] Flush(%s)%s\n", strPath, fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
+ if (!fDbEnvInit)
+ return;
+ {
+ LOCK(cs_db);
+ std::map<std::string, int>::iterator mi = mapFileUseCount.begin();
+ while (mi != mapFileUseCount.end()) {
+ std::string strFile = (*mi).first;
+ int nRefCount = (*mi).second;
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
+ if (nRefCount == 0) {
+ // Move log data to the dat file
+ CloseDb(strFile);
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile);
+ dbenv->txn_checkpoint(0, 0, 0);
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s detach\n", strFile);
+ if (!fMockDb)
+ dbenv->lsn_reset(strFile.c_str(), 0);
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile);
+ mapFileUseCount.erase(mi++);
+ } else
+ mi++;
+ }
+ LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
+ if (fShutdown) {
+ char** listp;
+ if (mapFileUseCount.empty()) {
+ dbenv->log_archive(&listp, DB_ARCH_REMOVE);
+ Close();
+ if (!fMockDb) {
+ fs::remove_all(fs::path(strPath) / "database");
+ }
+ }
+ }
+ }
+}
+
+bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase& database)
+{
+ if (database.IsDummy()) {
+ return true;
+ }
+ bool ret = false;
+ BerkeleyEnvironment *env = database.env.get();
+ const std::string& strFile = database.strFile;
+ TRY_LOCK(cs_db, lockDb);
+ if (lockDb)
+ {
+ // Don't do this if any databases are in use
+ int nRefCount = 0;
+ std::map<std::string, int>::iterator mit = env->mapFileUseCount.begin();
+ while (mit != env->mapFileUseCount.end())
+ {
+ nRefCount += (*mit).second;
+ mit++;
+ }
+
+ if (nRefCount == 0)
+ {
+ std::map<std::string, int>::iterator mi = env->mapFileUseCount.find(strFile);
+ if (mi != env->mapFileUseCount.end())
+ {
+ LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile);
+ int64_t nStart = GetTimeMillis();
+
+ // Flush wallet file so it's self contained
+ env->CloseDb(strFile);
+ env->CheckpointLSN(strFile);
+
+ env->mapFileUseCount.erase(mi++);
+ LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
+ ret = true;
+ }
+ }
+ }
+
+ return ret;
+}
+
+bool BerkeleyDatabase::Rewrite(const char* pszSkip)
+{
+ return BerkeleyBatch::Rewrite(*this, pszSkip);
+}
+
+bool BerkeleyDatabase::Backup(const std::string& strDest) const
+{
+ if (IsDummy()) {
+ return false;
+ }
+ while (true)
+ {
+ {
+ LOCK(cs_db);
+ if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0)
+ {
+ // Flush log data to the dat file
+ env->CloseDb(strFile);
+ env->CheckpointLSN(strFile);
+ env->mapFileUseCount.erase(strFile);
+
+ // Copy wallet file
+ fs::path pathSrc = env->Directory() / strFile;
+ fs::path pathDest(strDest);
+ if (fs::is_directory(pathDest))
+ pathDest /= strFile;
+
+ try {
+ if (fs::equivalent(pathSrc, pathDest)) {
+ LogPrintf("cannot backup to wallet source file %s\n", pathDest.string());
+ return false;
+ }
+
+ fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists);
+ LogPrintf("copied %s to %s\n", strFile, pathDest.string());
+ return true;
+ } catch (const fs::filesystem_error& e) {
+ LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), fsbridge::get_filesystem_error_message(e));
+ return false;
+ }
+ }
+ }
+ UninterruptibleSleep(std::chrono::milliseconds{100});
+ }
+}
+
+void BerkeleyDatabase::Flush(bool shutdown)
+{
+ if (!IsDummy()) {
+ env->Flush(shutdown);
+ if (shutdown) {
+ LOCK(cs_db);
+ g_dbenvs.erase(env->Directory().string());
+ env = nullptr;
+ } else {
+ // TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the
+ // first database shutdown when multiple databases are open in the same
+ // environment, should replace raw database `env` pointers with shared or weak
+ // pointers, or else separate the database and environment shutdowns so
+ // environments can be shut down after databases.
+ env->m_fileids.erase(strFile);
+ }
+ }
+}
+
+void BerkeleyDatabase::ReloadDbEnv()
+{
+ if (!IsDummy()) {
+ env->ReloadDbEnv();
+ }
+}
+
+Dbc* BerkeleyBatch::GetCursor()
+{
+ if (!pdb)
+ return nullptr;
+ Dbc* pcursor = nullptr;
+ int ret = pdb->cursor(nullptr, &pcursor, 0);
+ if (ret != 0)
+ return nullptr;
+ return pcursor;
+}
+
+int BerkeleyBatch::ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& ssValue)
+{
+ // Read at cursor
+ SafeDbt datKey;
+ SafeDbt datValue;
+ int ret = pcursor->get(datKey, datValue, DB_NEXT);
+ if (ret != 0)
+ return ret;
+ else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr)
+ return 99999;
+
+ // Convert to streams
+ ssKey.SetType(SER_DISK);
+ ssKey.clear();
+ ssKey.write((char*)datKey.get_data(), datKey.get_size());
+ ssValue.SetType(SER_DISK);
+ ssValue.clear();
+ ssValue.write((char*)datValue.get_data(), datValue.get_size());
+ return 0;
+}
+
+bool BerkeleyBatch::TxnBegin()
+{
+ if (!pdb || activeTxn)
+ return false;
+ DbTxn* ptxn = env->TxnBegin();
+ if (!ptxn)
+ return false;
+ activeTxn = ptxn;
+ return true;
+}
+
+bool BerkeleyBatch::TxnCommit()
+{
+ if (!pdb || !activeTxn)
+ return false;
+ int ret = activeTxn->commit(0);
+ activeTxn = nullptr;
+ return (ret == 0);
+}
+
+bool BerkeleyBatch::TxnAbort()
+{
+ if (!pdb || !activeTxn)
+ return false;
+ int ret = activeTxn->abort();
+ activeTxn = nullptr;
+ return (ret == 0);
+}
+
+std::string BerkeleyDatabaseVersion()
+{
+ return DbEnv::version(nullptr, nullptr, nullptr);
+}
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
new file mode 100644
index 0000000000..5ed364344b
--- /dev/null
+++ b/src/wallet/bdb.h
@@ -0,0 +1,337 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_WALLET_BDB_H
+#define BITCOIN_WALLET_BDB_H
+
+#include <clientversion.h>
+#include <fs.h>
+#include <serialize.h>
+#include <streams.h>
+#include <util/system.h>
+#include <wallet/db.h>
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
+#include <db_cxx.h>
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+struct bilingual_str;
+
+static const unsigned int DEFAULT_WALLET_DBLOGSIZE = 100;
+static const bool DEFAULT_WALLET_PRIVDB = true;
+
+struct WalletDatabaseFileId {
+ u_int8_t value[DB_FILE_ID_LEN];
+ bool operator==(const WalletDatabaseFileId& rhs) const;
+};
+
+class BerkeleyDatabase;
+
+class BerkeleyEnvironment
+{
+private:
+ bool fDbEnvInit;
+ bool fMockDb;
+ // Don't change into fs::path, as that can result in
+ // shutdown problems/crashes caused by a static initialized internal pointer.
+ std::string strPath;
+
+public:
+ std::unique_ptr<DbEnv> dbenv;
+ std::map<std::string, int> mapFileUseCount;
+ std::map<std::string, std::reference_wrapper<BerkeleyDatabase>> m_databases;
+ std::unordered_map<std::string, WalletDatabaseFileId> m_fileids;
+ std::condition_variable_any m_db_in_use;
+
+ BerkeleyEnvironment(const fs::path& env_directory);
+ BerkeleyEnvironment();
+ ~BerkeleyEnvironment();
+ void Reset();
+
+ bool IsMock() const { return fMockDb; }
+ bool IsInitialized() const { return fDbEnvInit; }
+ bool IsDatabaseLoaded(const std::string& db_filename) const { return m_databases.find(db_filename) != m_databases.end(); }
+ fs::path Directory() const { return strPath; }
+
+ bool Verify(const std::string& strFile);
+
+ bool Open(bool retry);
+ void Close();
+ void Flush(bool fShutdown);
+ void CheckpointLSN(const std::string& strFile);
+
+ void CloseDb(const std::string& strFile);
+ void ReloadDbEnv();
+
+ DbTxn* TxnBegin(int flags = DB_TXN_WRITE_NOSYNC)
+ {
+ DbTxn* ptxn = nullptr;
+ int ret = dbenv->txn_begin(nullptr, &ptxn, flags);
+ if (!ptxn || ret != 0)
+ return nullptr;
+ return ptxn;
+ }
+};
+
+/** Get BerkeleyEnvironment and database filename given a wallet path. */
+std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
+
+/** Return wheter a BDB wallet database is currently loaded. */
+bool IsBDBWalletLoaded(const fs::path& wallet_path);
+
+/** An instance of this class represents one database.
+ * For BerkeleyDB this is just a (env, strFile) tuple.
+ **/
+class BerkeleyDatabase
+{
+ friend class BerkeleyBatch;
+public:
+ /** Create dummy DB handle */
+ BerkeleyDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(nullptr)
+ {
+ }
+
+ /** Create DB handle to real database */
+ BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) :
+ nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(std::move(env)), strFile(std::move(filename))
+ {
+ auto inserted = this->env->m_databases.emplace(strFile, std::ref(*this));
+ assert(inserted.second);
+ }
+
+ ~BerkeleyDatabase() {
+ if (env) {
+ size_t erased = env->m_databases.erase(strFile);
+ assert(erased == 1);
+ }
+ }
+
+ /** Return object for accessing database at specified path. */
+ static std::unique_ptr<BerkeleyDatabase> Create(const fs::path& path)
+ {
+ std::string filename;
+ return MakeUnique<BerkeleyDatabase>(GetWalletEnv(path, filename), std::move(filename));
+ }
+
+ /** Return object for accessing dummy database with no read/write capabilities. */
+ static std::unique_ptr<BerkeleyDatabase> CreateDummy()
+ {
+ return MakeUnique<BerkeleyDatabase>();
+ }
+
+ /** Return object for accessing temporary in-memory database. */
+ static std::unique_ptr<BerkeleyDatabase> CreateMock()
+ {
+ return MakeUnique<BerkeleyDatabase>(std::make_shared<BerkeleyEnvironment>(), "");
+ }
+
+ /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero
+ */
+ bool Rewrite(const char* pszSkip=nullptr);
+
+ /** Back up the entire database to a file.
+ */
+ bool Backup(const std::string& strDest) const;
+
+ /** Make sure all changes are flushed to disk.
+ */
+ void Flush(bool shutdown);
+
+ void IncrementUpdateCounter();
+
+ void ReloadDbEnv();
+
+ std::atomic<unsigned int> nUpdateCounter;
+ unsigned int nLastSeen;
+ unsigned int nLastFlushed;
+ int64_t nLastWalletUpdate;
+
+ /**
+ * Pointer to shared database environment.
+ *
+ * Normally there is only one BerkeleyDatabase object per
+ * BerkeleyEnvivonment, but in the special, backwards compatible case where
+ * multiple wallet BDB data files are loaded from the same directory, this
+ * will point to a shared instance that gets freed when the last data file
+ * is closed.
+ */
+ std::shared_ptr<BerkeleyEnvironment> env;
+
+ /** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
+ std::unique_ptr<Db> m_db;
+
+private:
+ std::string strFile;
+
+ /** Return whether this database handle is a dummy for testing.
+ * Only to be used at a low level, application should ideally not care
+ * about this.
+ */
+ bool IsDummy() const { return env == nullptr; }
+};
+
+/** RAII class that provides access to a Berkeley database */
+class BerkeleyBatch
+{
+ /** RAII class that automatically cleanses its data on destruction */
+ class SafeDbt final
+ {
+ Dbt m_dbt;
+
+ public:
+ // construct Dbt with internally-managed data
+ SafeDbt();
+ // construct Dbt with provided data
+ SafeDbt(void* data, size_t size);
+ ~SafeDbt();
+
+ // delegate to Dbt
+ const void* get_data() const;
+ u_int32_t get_size() const;
+
+ // conversion operator to access the underlying Dbt
+ operator Dbt*();
+ };
+
+protected:
+ Db* pdb;
+ std::string strFile;
+ DbTxn* activeTxn;
+ bool fReadOnly;
+ bool fFlushOnClose;
+ BerkeleyEnvironment *env;
+
+public:
+ explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true);
+ ~BerkeleyBatch() { Close(); }
+
+ BerkeleyBatch(const BerkeleyBatch&) = delete;
+ BerkeleyBatch& operator=(const BerkeleyBatch&) = delete;
+
+ void Flush();
+ void Close();
+
+ /* flush the wallet passively (TRY_LOCK)
+ ideal to be called periodically */
+ static bool PeriodicFlush(BerkeleyDatabase& database);
+ /* verifies the database environment */
+ static bool VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr);
+ /* verifies the database file */
+ static bool VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr);
+
+ template <typename K, typename T>
+ bool Read(const K& key, T& value)
+ {
+ if (!pdb)
+ return false;
+
+ // Key
+ CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ ssKey.reserve(1000);
+ ssKey << key;
+ SafeDbt datKey(ssKey.data(), ssKey.size());
+
+ // Read
+ SafeDbt datValue;
+ int ret = pdb->get(activeTxn, datKey, datValue, 0);
+ bool success = false;
+ if (datValue.get_data() != nullptr) {
+ // Unserialize value
+ try {
+ CDataStream ssValue((char*)datValue.get_data(), (char*)datValue.get_data() + datValue.get_size(), SER_DISK, CLIENT_VERSION);
+ ssValue >> value;
+ success = true;
+ } catch (const std::exception&) {
+ // In this case success remains 'false'
+ }
+ }
+ return ret == 0 && success;
+ }
+
+ template <typename K, typename T>
+ bool Write(const K& key, const T& value, bool fOverwrite = true)
+ {
+ if (!pdb)
+ return true;
+ if (fReadOnly)
+ assert(!"Write called on database in read-only mode");
+
+ // Key
+ CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ ssKey.reserve(1000);
+ ssKey << key;
+ SafeDbt datKey(ssKey.data(), ssKey.size());
+
+ // Value
+ CDataStream ssValue(SER_DISK, CLIENT_VERSION);
+ ssValue.reserve(10000);
+ ssValue << value;
+ SafeDbt datValue(ssValue.data(), ssValue.size());
+
+ // Write
+ int ret = pdb->put(activeTxn, datKey, datValue, (fOverwrite ? 0 : DB_NOOVERWRITE));
+ return (ret == 0);
+ }
+
+ template <typename K>
+ bool Erase(const K& key)
+ {
+ if (!pdb)
+ return false;
+ if (fReadOnly)
+ assert(!"Erase called on database in read-only mode");
+
+ // Key
+ CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ ssKey.reserve(1000);
+ ssKey << key;
+ SafeDbt datKey(ssKey.data(), ssKey.size());
+
+ // Erase
+ int ret = pdb->del(activeTxn, datKey, 0);
+ return (ret == 0 || ret == DB_NOTFOUND);
+ }
+
+ template <typename K>
+ bool Exists(const K& key)
+ {
+ if (!pdb)
+ return false;
+
+ // Key
+ CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ ssKey.reserve(1000);
+ ssKey << key;
+ SafeDbt datKey(ssKey.data(), ssKey.size());
+
+ // Exists
+ int ret = pdb->exists(activeTxn, datKey, 0);
+ return (ret == 0);
+ }
+
+ Dbc* GetCursor();
+ int ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& ssValue);
+ bool TxnBegin();
+ bool TxnCommit();
+ bool TxnAbort();
+
+ bool static Rewrite(BerkeleyDatabase& database, const char* pszSkip = nullptr);
+};
+
+std::string BerkeleyDatabaseVersion();
+
+#endif // BITCOIN_WALLET_BDB_H
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index d90e8e6433..1eb82a03c7 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -3,55 +3,12 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <fs.h>
#include <wallet/db.h>
-#include <util/strencodings.h>
-#include <util/translation.h>
+#include <string>
-#include <stdint.h>
-
-#ifndef WIN32
-#include <sys/stat.h>
-#endif
-
-namespace {
-
-//! Make sure database has a unique fileid within the environment. If it
-//! doesn't, throw an error. BDB caches do not work properly when more than one
-//! open database has the same fileid (values written to one database may show
-//! up in reads to other databases).
-//!
-//! BerkeleyDB generates unique fileids by default
-//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
-//! so bitcoin should never create different databases with the same fileid, but
-//! this error can be triggered if users manually copy database files.
-void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filename, Db& db, WalletDatabaseFileId& fileid)
-{
- if (env.IsMock()) return;
-
- int ret = db.get_mpf()->get_fileid(fileid.value);
- if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret));
- }
-
- for (const auto& item : env.m_fileids) {
- if (fileid == item.second && &fileid != &item.second) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s)", filename,
- HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first));
- }
- }
-}
-
-RecursiveMutex cs_db;
-std::map<std::string, std::weak_ptr<BerkeleyEnvironment>> g_dbenvs GUARDED_BY(cs_db); //!< Map from directory name to db environment.
-} // namespace
-
-bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
-{
- return memcmp(value, &rhs.value, sizeof(value)) == 0;
-}
-
-static void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename)
+void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename)
{
if (fs::is_regular_file(wallet_path)) {
// Special case for backwards compatibility: if wallet path points to an
@@ -67,18 +24,6 @@ static void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory
}
}
-bool IsWalletLoaded(const fs::path& wallet_path)
-{
- fs::path env_directory;
- std::string database_filename;
- SplitWalletPath(wallet_path, env_directory, database_filename);
- LOCK(cs_db);
- auto env = g_dbenvs.find(env_directory.string());
- if (env == g_dbenvs.end()) return false;
- auto database = env->second.lock();
- return database && database->IsDatabaseLoaded(database_filename);
-}
-
fs::path WalletDataFilePath(const fs::path& wallet_path)
{
fs::path env_directory;
@@ -86,682 +31,3 @@ fs::path WalletDataFilePath(const fs::path& wallet_path)
SplitWalletPath(wallet_path, env_directory, database_filename);
return env_directory / database_filename;
}
-
-/**
- * @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
- * @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
- * @return A shared pointer to the BerkeleyEnvironment object for the wallet directory, never empty because ~BerkeleyEnvironment
- * erases the weak pointer from the g_dbenvs map.
- * @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map.
- */
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename)
-{
- fs::path env_directory;
- SplitWalletPath(wallet_path, env_directory, database_filename);
- LOCK(cs_db);
- auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
- if (inserted.second) {
- auto env = std::make_shared<BerkeleyEnvironment>(env_directory.string());
- inserted.first->second = env;
- return env;
- }
- return inserted.first->second.lock();
-}
-
-//
-// BerkeleyBatch
-//
-
-void BerkeleyEnvironment::Close()
-{
- if (!fDbEnvInit)
- return;
-
- fDbEnvInit = false;
-
- for (auto& db : m_databases) {
- auto count = mapFileUseCount.find(db.first);
- assert(count == mapFileUseCount.end() || count->second == 0);
- BerkeleyDatabase& database = db.second.get();
- if (database.m_db) {
- database.m_db->close(0);
- database.m_db.reset();
- }
- }
-
- FILE* error_file = nullptr;
- dbenv->get_errfile(&error_file);
-
- int ret = dbenv->close(0);
- if (ret != 0)
- LogPrintf("BerkeleyEnvironment::Close: Error %d closing database environment: %s\n", ret, DbEnv::strerror(ret));
- if (!fMockDb)
- DbEnv((u_int32_t)0).remove(strPath.c_str(), 0);
-
- if (error_file) fclose(error_file);
-
- UnlockDirectory(strPath, ".walletlock");
-}
-
-void BerkeleyEnvironment::Reset()
-{
- dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS));
- fDbEnvInit = false;
- fMockDb = false;
-}
-
-BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(dir_path.string())
-{
- Reset();
-}
-
-BerkeleyEnvironment::~BerkeleyEnvironment()
-{
- LOCK(cs_db);
- g_dbenvs.erase(strPath);
- Close();
-}
-
-bool BerkeleyEnvironment::Open(bool retry)
-{
- if (fDbEnvInit) {
- return true;
- }
-
- fs::path pathIn = strPath;
- TryCreateDirectories(pathIn);
- if (!LockDirectory(pathIn, ".walletlock")) {
- LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it.\n", strPath);
- return false;
- }
-
- fs::path pathLogDir = pathIn / "database";
- TryCreateDirectories(pathLogDir);
- fs::path pathErrorFile = pathIn / "db.log";
- LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string());
-
- unsigned int nEnvFlags = 0;
- if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB))
- nEnvFlags |= DB_PRIVATE;
-
- dbenv->set_lg_dir(pathLogDir.string().c_str());
- dbenv->set_cachesize(0, 0x100000, 1); // 1 MiB should be enough for just the wallet
- dbenv->set_lg_bsize(0x10000);
- dbenv->set_lg_max(1048576);
- dbenv->set_lk_max_locks(40000);
- dbenv->set_lk_max_objects(40000);
- dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); /// debug
- dbenv->set_flags(DB_AUTO_COMMIT, 1);
- dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1);
- dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1);
- int ret = dbenv->open(strPath.c_str(),
- DB_CREATE |
- DB_INIT_LOCK |
- DB_INIT_LOG |
- DB_INIT_MPOOL |
- DB_INIT_TXN |
- DB_THREAD |
- DB_RECOVER |
- nEnvFlags,
- S_IRUSR | S_IWUSR);
- if (ret != 0) {
- LogPrintf("BerkeleyEnvironment::Open: Error %d opening database environment: %s\n", ret, DbEnv::strerror(ret));
- int ret2 = dbenv->close(0);
- if (ret2 != 0) {
- LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2));
- }
- Reset();
- if (retry) {
- // try moving the database env out of the way
- fs::path pathDatabaseBak = pathIn / strprintf("database.%d.bak", GetTime());
- try {
- fs::rename(pathLogDir, pathDatabaseBak);
- LogPrintf("Moved old %s to %s. Retrying.\n", pathLogDir.string(), pathDatabaseBak.string());
- } catch (const fs::filesystem_error&) {
- // failure is ok (well, not really, but it's not worse than what we started with)
- }
- // try opening it again one more time
- if (!Open(false /* retry */)) {
- // if it still fails, it probably means we can't even create the database env
- return false;
- }
- } else {
- return false;
- }
- }
-
- fDbEnvInit = true;
- fMockDb = false;
- return true;
-}
-
-//! Construct an in-memory mock Berkeley environment for testing
-BerkeleyEnvironment::BerkeleyEnvironment()
-{
- Reset();
-
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::MakeMock\n");
-
- dbenv->set_cachesize(1, 0, 1);
- dbenv->set_lg_bsize(10485760 * 4);
- dbenv->set_lg_max(10485760);
- dbenv->set_lk_max_locks(10000);
- dbenv->set_lk_max_objects(10000);
- dbenv->set_flags(DB_AUTO_COMMIT, 1);
- dbenv->log_set_config(DB_LOG_IN_MEMORY, 1);
- int ret = dbenv->open(nullptr,
- DB_CREATE |
- DB_INIT_LOCK |
- DB_INIT_LOG |
- DB_INIT_MPOOL |
- DB_INIT_TXN |
- DB_THREAD |
- DB_PRIVATE,
- S_IRUSR | S_IWUSR);
- if (ret > 0) {
- throw std::runtime_error(strprintf("BerkeleyEnvironment::MakeMock: Error %d opening database environment.", ret));
- }
-
- fDbEnvInit = true;
- fMockDb = true;
-}
-
-bool BerkeleyEnvironment::Verify(const std::string& strFile)
-{
- LOCK(cs_db);
- assert(mapFileUseCount.count(strFile) == 0);
-
- Db db(dbenv.get(), 0);
- int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
- return result == 0;
-}
-
-BerkeleyBatch::SafeDbt::SafeDbt()
-{
- m_dbt.set_flags(DB_DBT_MALLOC);
-}
-
-BerkeleyBatch::SafeDbt::SafeDbt(void* data, size_t size)
- : m_dbt(data, size)
-{
-}
-
-BerkeleyBatch::SafeDbt::~SafeDbt()
-{
- if (m_dbt.get_data() != nullptr) {
- // Clear memory, e.g. in case it was a private key
- memory_cleanse(m_dbt.get_data(), m_dbt.get_size());
- // under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
- // freed by the caller.
- // https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
- if (m_dbt.get_flags() & DB_DBT_MALLOC) {
- free(m_dbt.get_data());
- }
- }
-}
-
-const void* BerkeleyBatch::SafeDbt::get_data() const
-{
- return m_dbt.get_data();
-}
-
-u_int32_t BerkeleyBatch::SafeDbt::get_size() const
-{
- return m_dbt.get_size();
-}
-
-BerkeleyBatch::SafeDbt::operator Dbt*()
-{
- return &m_dbt;
-}
-
-bool BerkeleyBatch::VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr)
-{
- std::string walletFile;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile);
- fs::path walletDir = env->Directory();
-
- LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
- LogPrintf("Using wallet %s\n", file_path.string());
-
- if (!env->Open(true /* retry */)) {
- errorStr = strprintf(_("Error initializing wallet database environment %s!"), walletDir);
- return false;
- }
-
- return true;
-}
-
-bool BerkeleyBatch::VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr)
-{
- std::string walletFile;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile);
- fs::path walletDir = env->Directory();
-
- if (fs::exists(walletDir / walletFile))
- {
- if (!env->Verify(walletFile)) {
- errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), walletFile);
- return false;
- }
- }
- // also return true if files does not exists
- return true;
-}
-
-void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile)
-{
- dbenv->txn_checkpoint(0, 0, 0);
- if (fMockDb)
- return;
- dbenv->lsn_reset(strFile.c_str(), 0);
-}
-
-
-BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr)
-{
- fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w'));
- fFlushOnClose = fFlushOnCloseIn;
- env = database.env.get();
- if (database.IsDummy()) {
- return;
- }
- const std::string &strFilename = database.strFile;
-
- bool fCreate = strchr(pszMode, 'c') != nullptr;
- unsigned int nFlags = DB_THREAD;
- if (fCreate)
- nFlags |= DB_CREATE;
-
- {
- LOCK(cs_db);
- if (!env->Open(false /* retry */))
- throw std::runtime_error("BerkeleyBatch: Failed to open database environment.");
-
- pdb = database.m_db.get();
- if (pdb == nullptr) {
- int ret;
- std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0);
-
- bool fMockDb = env->IsMock();
- if (fMockDb) {
- DbMpoolFile* mpf = pdb_temp->get_mpf();
- ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
- if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Failed to configure for no temp file backing for database %s", strFilename));
- }
- }
-
- ret = pdb_temp->open(nullptr, // Txn pointer
- fMockDb ? nullptr : strFilename.c_str(), // Filename
- fMockDb ? strFilename.c_str() : "main", // Logical db name
- DB_BTREE, // Database type
- nFlags, // Flags
- 0);
-
- if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename));
- }
-
- // Call CheckUniqueFileid on the containing BDB environment to
- // avoid BDB data consistency bugs that happen when different data
- // files in the same environment have the same fileid.
- //
- // Also call CheckUniqueFileid on all the other g_dbenvs to prevent
- // bitcoin from opening the same data file through another
- // environment when the file is referenced through equivalent but
- // not obviously identical symlinked or hard linked or bind mounted
- // paths. In the future a more relaxed check for equal inode and
- // device ids could be done instead, which would allow opening
- // different backup copies of a wallet at the same time. Maybe even
- // more ideally, an exclusive lock for accessing the database could
- // be implemented, so no equality checks are needed at all. (Newer
- // versions of BDB have an set_lk_exclusive method for this
- // purpose, but the older version we use does not.)
- for (const auto& env : g_dbenvs) {
- CheckUniqueFileid(*env.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]);
- }
-
- pdb = pdb_temp.release();
- database.m_db.reset(pdb);
-
- if (fCreate && !Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
- }
- ++env->mapFileUseCount[strFilename];
- strFile = strFilename;
- }
-}
-
-void BerkeleyBatch::Flush()
-{
- if (activeTxn)
- return;
-
- // Flush database activity from memory pool to disk log
- unsigned int nMinutes = 0;
- if (fReadOnly)
- nMinutes = 1;
-
- if (env) { // env is nullptr for dummy databases (i.e. in tests). Don't actually flush if env is nullptr so we don't segfault
- env->dbenv->txn_checkpoint(nMinutes ? gArgs.GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 : 0, nMinutes, 0);
- }
-}
-
-void BerkeleyDatabase::IncrementUpdateCounter()
-{
- ++nUpdateCounter;
-}
-
-void BerkeleyBatch::Close()
-{
- if (!pdb)
- return;
- if (activeTxn)
- activeTxn->abort();
- activeTxn = nullptr;
- pdb = nullptr;
-
- if (fFlushOnClose)
- Flush();
-
- {
- LOCK(cs_db);
- --env->mapFileUseCount[strFile];
- }
- env->m_db_in_use.notify_all();
-}
-
-void BerkeleyEnvironment::CloseDb(const std::string& strFile)
-{
- {
- LOCK(cs_db);
- auto it = m_databases.find(strFile);
- assert(it != m_databases.end());
- BerkeleyDatabase& database = it->second.get();
- if (database.m_db) {
- // Close the database handle
- database.m_db->close(0);
- database.m_db.reset();
- }
- }
-}
-
-void BerkeleyEnvironment::ReloadDbEnv()
-{
- // Make sure that no Db's are in use
- AssertLockNotHeld(cs_db);
- std::unique_lock<RecursiveMutex> lock(cs_db);
- m_db_in_use.wait(lock, [this](){
- for (auto& count : mapFileUseCount) {
- if (count.second > 0) return false;
- }
- return true;
- });
-
- std::vector<std::string> filenames;
- for (auto it : m_databases) {
- filenames.push_back(it.first);
- }
- // Close the individual Db's
- for (const std::string& filename : filenames) {
- CloseDb(filename);
- }
- // Reset the environment
- Flush(true); // This will flush and close the environment
- Reset();
- Open(true);
-}
-
-bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip)
-{
- if (database.IsDummy()) {
- return true;
- }
- BerkeleyEnvironment *env = database.env.get();
- const std::string& strFile = database.strFile;
- while (true) {
- {
- LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) {
- // Flush log data to the dat file
- env->CloseDb(strFile);
- env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
-
- bool fSuccess = true;
- LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
- std::string strFileRes = strFile + ".rewrite";
- { // surround usage of db with extra {}
- BerkeleyBatch db(database, "r");
- std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0);
-
- int ret = pdbCopy->open(nullptr, // Txn pointer
- strFileRes.c_str(), // Filename
- "main", // Logical db name
- DB_BTREE, // Database type
- DB_CREATE, // Flags
- 0);
- if (ret > 0) {
- LogPrintf("BerkeleyBatch::Rewrite: Can't create database file %s\n", strFileRes);
- fSuccess = false;
- }
-
- Dbc* pcursor = db.GetCursor();
- if (pcursor)
- while (fSuccess) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue);
- if (ret1 == DB_NOTFOUND) {
- pcursor->close();
- break;
- } else if (ret1 != 0) {
- pcursor->close();
- fSuccess = false;
- break;
- }
- if (pszSkip &&
- strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0)
- continue;
- if (strncmp(ssKey.data(), "\x07version", 8) == 0) {
- // Update version:
- ssValue.clear();
- ssValue << CLIENT_VERSION;
- }
- Dbt datKey(ssKey.data(), ssKey.size());
- Dbt datValue(ssValue.data(), ssValue.size());
- int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE);
- if (ret2 > 0)
- fSuccess = false;
- }
- if (fSuccess) {
- db.Close();
- env->CloseDb(strFile);
- if (pdbCopy->close(0))
- fSuccess = false;
- } else {
- pdbCopy->close(0);
- }
- }
- if (fSuccess) {
- Db dbA(env->dbenv.get(), 0);
- if (dbA.remove(strFile.c_str(), nullptr, 0))
- fSuccess = false;
- Db dbB(env->dbenv.get(), 0);
- if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0))
- fSuccess = false;
- }
- if (!fSuccess)
- LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite database file %s\n", strFileRes);
- return fSuccess;
- }
- }
- UninterruptibleSleep(std::chrono::milliseconds{100});
- }
-}
-
-
-void BerkeleyEnvironment::Flush(bool fShutdown)
-{
- int64_t nStart = GetTimeMillis();
- // Flush log data to the actual data file on all files that are not in use
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: [%s] Flush(%s)%s\n", strPath, fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
- if (!fDbEnvInit)
- return;
- {
- LOCK(cs_db);
- std::map<std::string, int>::iterator mi = mapFileUseCount.begin();
- while (mi != mapFileUseCount.end()) {
- std::string strFile = (*mi).first;
- int nRefCount = (*mi).second;
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
- if (nRefCount == 0) {
- // Move log data to the dat file
- CloseDb(strFile);
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile);
- dbenv->txn_checkpoint(0, 0, 0);
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s detach\n", strFile);
- if (!fMockDb)
- dbenv->lsn_reset(strFile.c_str(), 0);
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile);
- mapFileUseCount.erase(mi++);
- } else
- mi++;
- }
- LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
- if (fShutdown) {
- char** listp;
- if (mapFileUseCount.empty()) {
- dbenv->log_archive(&listp, DB_ARCH_REMOVE);
- Close();
- if (!fMockDb) {
- fs::remove_all(fs::path(strPath) / "database");
- }
- }
- }
- }
-}
-
-bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase& database)
-{
- if (database.IsDummy()) {
- return true;
- }
- bool ret = false;
- BerkeleyEnvironment *env = database.env.get();
- const std::string& strFile = database.strFile;
- TRY_LOCK(cs_db, lockDb);
- if (lockDb)
- {
- // Don't do this if any databases are in use
- int nRefCount = 0;
- std::map<std::string, int>::iterator mit = env->mapFileUseCount.begin();
- while (mit != env->mapFileUseCount.end())
- {
- nRefCount += (*mit).second;
- mit++;
- }
-
- if (nRefCount == 0)
- {
- std::map<std::string, int>::iterator mi = env->mapFileUseCount.find(strFile);
- if (mi != env->mapFileUseCount.end())
- {
- LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile);
- int64_t nStart = GetTimeMillis();
-
- // Flush wallet file so it's self contained
- env->CloseDb(strFile);
- env->CheckpointLSN(strFile);
-
- env->mapFileUseCount.erase(mi++);
- LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
- ret = true;
- }
- }
- }
-
- return ret;
-}
-
-bool BerkeleyDatabase::Rewrite(const char* pszSkip)
-{
- return BerkeleyBatch::Rewrite(*this, pszSkip);
-}
-
-bool BerkeleyDatabase::Backup(const std::string& strDest) const
-{
- if (IsDummy()) {
- return false;
- }
- while (true)
- {
- {
- LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0)
- {
- // Flush log data to the dat file
- env->CloseDb(strFile);
- env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
-
- // Copy wallet file
- fs::path pathSrc = env->Directory() / strFile;
- fs::path pathDest(strDest);
- if (fs::is_directory(pathDest))
- pathDest /= strFile;
-
- try {
- if (fs::equivalent(pathSrc, pathDest)) {
- LogPrintf("cannot backup to wallet source file %s\n", pathDest.string());
- return false;
- }
-
- fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists);
- LogPrintf("copied %s to %s\n", strFile, pathDest.string());
- return true;
- } catch (const fs::filesystem_error& e) {
- LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), fsbridge::get_filesystem_error_message(e));
- return false;
- }
- }
- }
- UninterruptibleSleep(std::chrono::milliseconds{100});
- }
-}
-
-void BerkeleyDatabase::Flush(bool shutdown)
-{
- if (!IsDummy()) {
- env->Flush(shutdown);
- if (shutdown) {
- LOCK(cs_db);
- g_dbenvs.erase(env->Directory().string());
- env = nullptr;
- } else {
- // TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the
- // first database shutdown when multiple databases are open in the same
- // environment, should replace raw database `env` pointers with shared or weak
- // pointers, or else separate the database and environment shutdowns so
- // environments can be shut down after databases.
- env->m_fileids.erase(strFile);
- }
- }
-}
-
-void BerkeleyDatabase::ReloadDbEnv()
-{
- if (!IsDummy()) {
- env->ReloadDbEnv();
- }
-}
-
-std::string BerkeleyDatabaseVersion()
-{
- return DbEnv::version(nullptr, nullptr, nullptr);
-}
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 54ce144ffc..1322bf54fa 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -6,389 +6,12 @@
#ifndef BITCOIN_WALLET_DB_H
#define BITCOIN_WALLET_DB_H
-#include <clientversion.h>
#include <fs.h>
-#include <serialize.h>
-#include <streams.h>
-#include <util/system.h>
-#include <atomic>
-#include <map>
-#include <memory>
#include <string>
-#include <unordered_map>
-#include <vector>
-
-#if defined(__GNUC__) && !defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsuggest-override"
-#endif
-#include <db_cxx.h>
-#if defined(__GNUC__) && !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
-
-struct bilingual_str;
-
-static const unsigned int DEFAULT_WALLET_DBLOGSIZE = 100;
-static const bool DEFAULT_WALLET_PRIVDB = true;
-
-struct WalletDatabaseFileId {
- u_int8_t value[DB_FILE_ID_LEN];
- bool operator==(const WalletDatabaseFileId& rhs) const;
-};
-
-class BerkeleyDatabase;
-
-class BerkeleyEnvironment
-{
-private:
- bool fDbEnvInit;
- bool fMockDb;
- // Don't change into fs::path, as that can result in
- // shutdown problems/crashes caused by a static initialized internal pointer.
- std::string strPath;
-
-public:
- std::unique_ptr<DbEnv> dbenv;
- std::map<std::string, int> mapFileUseCount;
- std::map<std::string, std::reference_wrapper<BerkeleyDatabase>> m_databases;
- std::unordered_map<std::string, WalletDatabaseFileId> m_fileids;
- std::condition_variable_any m_db_in_use;
-
- BerkeleyEnvironment(const fs::path& env_directory);
- BerkeleyEnvironment();
- ~BerkeleyEnvironment();
- void Reset();
-
- bool IsMock() const { return fMockDb; }
- bool IsInitialized() const { return fDbEnvInit; }
- bool IsDatabaseLoaded(const std::string& db_filename) const { return m_databases.find(db_filename) != m_databases.end(); }
- fs::path Directory() const { return strPath; }
-
- bool Verify(const std::string& strFile);
-
- bool Open(bool retry);
- void Close();
- void Flush(bool fShutdown);
- void CheckpointLSN(const std::string& strFile);
-
- void CloseDb(const std::string& strFile);
- void ReloadDbEnv();
-
- DbTxn* TxnBegin(int flags = DB_TXN_WRITE_NOSYNC)
- {
- DbTxn* ptxn = nullptr;
- int ret = dbenv->txn_begin(nullptr, &ptxn, flags);
- if (!ptxn || ret != 0)
- return nullptr;
- return ptxn;
- }
-};
-
-/** Return whether a wallet database is currently loaded. */
-bool IsWalletLoaded(const fs::path& wallet_path);
/** Given a wallet directory path or legacy file path, return path to main data file in the wallet database. */
fs::path WalletDataFilePath(const fs::path& wallet_path);
-
-/** Get BerkeleyEnvironment and database filename given a wallet path. */
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
-
-/** An instance of this class represents one database.
- * For BerkeleyDB this is just a (env, strFile) tuple.
- **/
-class BerkeleyDatabase
-{
- friend class BerkeleyBatch;
-public:
- /** Create dummy DB handle */
- BerkeleyDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(nullptr)
- {
- }
-
- /** Create DB handle to real database */
- BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) :
- nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(std::move(env)), strFile(std::move(filename))
- {
- auto inserted = this->env->m_databases.emplace(strFile, std::ref(*this));
- assert(inserted.second);
- }
-
- ~BerkeleyDatabase() {
- if (env) {
- size_t erased = env->m_databases.erase(strFile);
- assert(erased == 1);
- }
- }
-
- /** Return object for accessing database at specified path. */
- static std::unique_ptr<BerkeleyDatabase> Create(const fs::path& path)
- {
- std::string filename;
- return MakeUnique<BerkeleyDatabase>(GetWalletEnv(path, filename), std::move(filename));
- }
-
- /** Return object for accessing dummy database with no read/write capabilities. */
- static std::unique_ptr<BerkeleyDatabase> CreateDummy()
- {
- return MakeUnique<BerkeleyDatabase>();
- }
-
- /** Return object for accessing temporary in-memory database. */
- static std::unique_ptr<BerkeleyDatabase> CreateMock()
- {
- return MakeUnique<BerkeleyDatabase>(std::make_shared<BerkeleyEnvironment>(), "");
- }
-
- /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero
- */
- bool Rewrite(const char* pszSkip=nullptr);
-
- /** Back up the entire database to a file.
- */
- bool Backup(const std::string& strDest) const;
-
- /** Make sure all changes are flushed to disk.
- */
- void Flush(bool shutdown);
-
- void IncrementUpdateCounter();
-
- void ReloadDbEnv();
-
- std::atomic<unsigned int> nUpdateCounter;
- unsigned int nLastSeen;
- unsigned int nLastFlushed;
- int64_t nLastWalletUpdate;
-
- /**
- * Pointer to shared database environment.
- *
- * Normally there is only one BerkeleyDatabase object per
- * BerkeleyEnvivonment, but in the special, backwards compatible case where
- * multiple wallet BDB data files are loaded from the same directory, this
- * will point to a shared instance that gets freed when the last data file
- * is closed.
- */
- std::shared_ptr<BerkeleyEnvironment> env;
-
- /** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
- std::unique_ptr<Db> m_db;
-
-private:
- std::string strFile;
-
- /** Return whether this database handle is a dummy for testing.
- * Only to be used at a low level, application should ideally not care
- * about this.
- */
- bool IsDummy() const { return env == nullptr; }
-};
-
-/** RAII class that provides access to a Berkeley database */
-class BerkeleyBatch
-{
- /** RAII class that automatically cleanses its data on destruction */
- class SafeDbt final
- {
- Dbt m_dbt;
-
- public:
- // construct Dbt with internally-managed data
- SafeDbt();
- // construct Dbt with provided data
- SafeDbt(void* data, size_t size);
- ~SafeDbt();
-
- // delegate to Dbt
- const void* get_data() const;
- u_int32_t get_size() const;
-
- // conversion operator to access the underlying Dbt
- operator Dbt*();
- };
-
-protected:
- Db* pdb;
- std::string strFile;
- DbTxn* activeTxn;
- bool fReadOnly;
- bool fFlushOnClose;
- BerkeleyEnvironment *env;
-
-public:
- explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true);
- ~BerkeleyBatch() { Close(); }
-
- BerkeleyBatch(const BerkeleyBatch&) = delete;
- BerkeleyBatch& operator=(const BerkeleyBatch&) = delete;
-
- void Flush();
- void Close();
-
- /* flush the wallet passively (TRY_LOCK)
- ideal to be called periodically */
- static bool PeriodicFlush(BerkeleyDatabase& database);
- /* verifies the database environment */
- static bool VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr);
- /* verifies the database file */
- static bool VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr);
-
- template <typename K, typename T>
- bool Read(const K& key, T& value)
- {
- if (!pdb)
- return false;
-
- // Key
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(1000);
- ssKey << key;
- SafeDbt datKey(ssKey.data(), ssKey.size());
-
- // Read
- SafeDbt datValue;
- int ret = pdb->get(activeTxn, datKey, datValue, 0);
- bool success = false;
- if (datValue.get_data() != nullptr) {
- // Unserialize value
- try {
- CDataStream ssValue((char*)datValue.get_data(), (char*)datValue.get_data() + datValue.get_size(), SER_DISK, CLIENT_VERSION);
- ssValue >> value;
- success = true;
- } catch (const std::exception&) {
- // In this case success remains 'false'
- }
- }
- return ret == 0 && success;
- }
-
- template <typename K, typename T>
- bool Write(const K& key, const T& value, bool fOverwrite = true)
- {
- if (!pdb)
- return true;
- if (fReadOnly)
- assert(!"Write called on database in read-only mode");
-
- // Key
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(1000);
- ssKey << key;
- SafeDbt datKey(ssKey.data(), ssKey.size());
-
- // Value
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- ssValue.reserve(10000);
- ssValue << value;
- SafeDbt datValue(ssValue.data(), ssValue.size());
-
- // Write
- int ret = pdb->put(activeTxn, datKey, datValue, (fOverwrite ? 0 : DB_NOOVERWRITE));
- return (ret == 0);
- }
-
- template <typename K>
- bool Erase(const K& key)
- {
- if (!pdb)
- return false;
- if (fReadOnly)
- assert(!"Erase called on database in read-only mode");
-
- // Key
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(1000);
- ssKey << key;
- SafeDbt datKey(ssKey.data(), ssKey.size());
-
- // Erase
- int ret = pdb->del(activeTxn, datKey, 0);
- return (ret == 0 || ret == DB_NOTFOUND);
- }
-
- template <typename K>
- bool Exists(const K& key)
- {
- if (!pdb)
- return false;
-
- // Key
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(1000);
- ssKey << key;
- SafeDbt datKey(ssKey.data(), ssKey.size());
-
- // Exists
- int ret = pdb->exists(activeTxn, datKey, 0);
- return (ret == 0);
- }
-
- Dbc* GetCursor()
- {
- if (!pdb)
- return nullptr;
- Dbc* pcursor = nullptr;
- int ret = pdb->cursor(nullptr, &pcursor, 0);
- if (ret != 0)
- return nullptr;
- return pcursor;
- }
-
- int ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& ssValue)
- {
- // Read at cursor
- SafeDbt datKey;
- SafeDbt datValue;
- int ret = pcursor->get(datKey, datValue, DB_NEXT);
- if (ret != 0)
- return ret;
- else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr)
- return 99999;
-
- // Convert to streams
- ssKey.SetType(SER_DISK);
- ssKey.clear();
- ssKey.write((char*)datKey.get_data(), datKey.get_size());
- ssValue.SetType(SER_DISK);
- ssValue.clear();
- ssValue.write((char*)datValue.get_data(), datValue.get_size());
- return 0;
- }
-
- bool TxnBegin()
- {
- if (!pdb || activeTxn)
- return false;
- DbTxn* ptxn = env->TxnBegin();
- if (!ptxn)
- return false;
- activeTxn = ptxn;
- return true;
- }
-
- bool TxnCommit()
- {
- if (!pdb || !activeTxn)
- return false;
- int ret = activeTxn->commit(0);
- activeTxn = nullptr;
- return (ret == 0);
- }
-
- bool TxnAbort()
- {
- if (!pdb || !activeTxn)
- return false;
- int ret = activeTxn->abort();
- activeTxn = nullptr;
- return (ret == 0);
- }
-
- bool static Rewrite(BerkeleyDatabase& database, const char* pszSkip = nullptr);
-};
-
-std::string BerkeleyDatabaseVersion();
+void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename);
#endif // BITCOIN_WALLET_DB_H
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index f4a4c9fa7c..8f0083cd2e 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -8,7 +8,7 @@
#include <fs.h>
#include <test/util/setup_common.h>
-#include <wallet/db.h>
+#include <wallet/bdb.h>
BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup)
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 5f318f92ad..cf42c1cb3c 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -746,7 +746,6 @@ void CWallet::SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned
bool CWallet::IsSpentKey(const uint256& hash, unsigned int n) const
{
AssertLockHeld(cs_wallet);
- CTxDestination dst;
const CWalletTx* srctx = GetWalletTx(hash);
if (srctx) {
assert(srctx->tx->vout.size() > n);
@@ -2742,6 +2741,12 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac
// Get the fee rate to use effective values in coin selection
CFeeRate nFeeRateNeeded = GetMinimumFeeRate(*this, coin_control, &feeCalc);
+ // Do not, ever, assume that it's fine to change the fee rate if the user has explicitly
+ // provided one
+ if (coin_control.m_feerate && nFeeRateNeeded > *coin_control.m_feerate) {
+ error = strprintf(_("Fee rate (%s) is lower than the minimum fee rate setting (%s)"), coin_control.m_feerate->ToString(), nFeeRateNeeded.ToString());
+ return false;
+ }
nFeeRet = 0;
bool pick_new_inputs = true;
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index cb516f70f0..2f6c2b883a 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -1018,3 +1018,8 @@ bool WalletBatch::TxnAbort()
{
return m_batch.TxnAbort();
}
+
+bool IsWalletLoaded(const fs::path& wallet_path)
+{
+ return IsBDBWalletLoaded(wallet_path);
+}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index b95ed24d12..1037bd579f 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -8,6 +8,7 @@
#include <amount.h>
#include <script/sign.h>
+#include <wallet/bdb.h>
#include <wallet/db.h>
#include <wallet/walletutil.h>
#include <key.h>
@@ -289,4 +290,7 @@ void MaybeCompactWalletDB();
//! Unserialize a given Key-Value pair and load it into the wallet
bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr);
+/** Return whether a wallet database is currently loaded. */
+bool IsWalletLoaded(const fs::path& wallet_path);
+
#endif // BITCOIN_WALLET_WALLETDB_H
diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py
index 596ff206f2..0ab309f9b9 100755
--- a/test/functional/feature_backwards_compatibility.py
+++ b/test/functional/feature_backwards_compatibility.py
@@ -26,7 +26,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
- adjust_bitcoin_conf_for_pre_17,
assert_equal,
sync_blocks,
sync_mempools,
@@ -60,8 +59,6 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
170100,
160300,
])
- # adapt bitcoin.conf, because older bitcoind's don't recognize config sections
- adjust_bitcoin_conf_for_pre_17(self.nodes[5].bitcoinconf)
self.start_nodes()
diff --git a/test/functional/mempool_compatibility.py b/test/functional/mempool_compatibility.py
index eb2fee11ae..999399dec0 100755
--- a/test/functional/mempool_compatibility.py
+++ b/test/functional/mempool_compatibility.py
@@ -16,9 +16,7 @@ Only v0.15.2 is required by this test. The rest is used in other backwards compa
import os
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (
- adjust_bitcoin_conf_for_pre_17
-)
+
class MempoolCompatibilityTest(BitcoinTestFramework):
def set_test_params(self):
@@ -33,7 +31,6 @@ class MempoolCompatibilityTest(BitcoinTestFramework):
150200, # oldest version supported by the test framework
None,
])
- adjust_bitcoin_conf_for_pre_17(self.nodes[0].bitcoinconf)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index 5726a73e40..741da3be31 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -124,11 +124,11 @@ class FilterTest(BitcoinTestFramework):
self.log.info("Check that a node with bloom filters enabled services p2p mempool messages")
filter_peer = P2PBloomFilter()
- self.log.info("Create a tx relevant to the peer before connecting")
+ self.log.debug("Create a tx relevant to the peer before connecting")
filter_address = self.nodes[0].decodescript(filter_peer.watch_script_pubkey)['addresses'][0]
txid = self.nodes[0].sendtoaddress(filter_address, 90)
- self.log.info("Send a mempool msg after connecting and check that the tx is received")
+ self.log.debug("Send a mempool msg after connecting and check that the tx is received")
self.nodes[0].add_p2p_connection(filter_peer)
filter_peer.send_and_ping(filter_peer.watch_filter_init)
self.nodes[0].p2p.send_message(msg_mempool())
@@ -227,8 +227,8 @@ class FilterTest(BitcoinTestFramework):
self.test_frelay_false(filter_peer_without_nrelay)
self.test_filter(filter_peer_without_nrelay)
- self.log.info('Test msg_mempool')
self.test_msg_mempool()
+
if __name__ == '__main__':
FilterTest().main()
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index 157af68203..3b3dbd08f2 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -132,9 +132,6 @@ class P2PLeakTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
- # Wait until all connections are closed
- wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
-
# Make sure no unexpected messages came in
assert no_version_bannode.unexpected_msg == False
assert no_version_idlenode.unexpected_msg == False
diff --git a/test/functional/p2p_nobloomfilter_messages.py b/test/functional/p2p_nobloomfilter_messages.py
index 8478a752e7..accc5dc23c 100755
--- a/test/functional/p2p_nobloomfilter_messages.py
+++ b/test/functional/p2p_nobloomfilter_messages.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test invalid p2p messages for nodes with bloom filters disabled.
-Test that, when bloom filters are not enabled, nodes are disconnected if:
+Test that, when bloom filters are not enabled, peers are disconnected if:
1. They send a p2p mempool message
2. They send a p2p filterload message
3. They send a p2p filteradd message
@@ -17,31 +17,32 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
-class P2PNobloomfilterMessages(BitcoinTestFramework):
+class P2PNoBloomFilterMessages(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def test_message_causes_disconnect(self, message):
- # Add a p2p connection that sends a message and check that it disconnects
+ """Add a p2p connection that sends a message and check that it disconnects."""
peer = self.nodes[0].add_p2p_connection(P2PInterface())
peer.send_message(message)
peer.wait_for_disconnect()
- assert_equal(len(self.nodes[0].getpeerinfo()), 0)
+ assert_equal(self.nodes[0].getconnectioncount(), 0)
def run_test(self):
- self.log.info("Test that node is disconnected if it sends mempool message")
+ self.log.info("Test that peer is disconnected if it sends mempool message")
self.test_message_causes_disconnect(msg_mempool())
- self.log.info("Test that node is disconnected if it sends filterload message")
+ self.log.info("Test that peer is disconnected if it sends filterload message")
self.test_message_causes_disconnect(msg_filterload())
- self.log.info("Test that node is disconnected if it sends filteradd message")
+ self.log.info("Test that peer is disconnected if it sends filteradd message")
self.test_message_causes_disconnect(msg_filteradd(data=b'\xcc'))
self.log.info("Test that peer is disconnected if it sends a filterclear message")
self.test_message_causes_disconnect(msg_filterclear())
+
if __name__ == '__main__':
- P2PNobloomfilterMessages().main()
+ P2PNoBloomFilterMessages().main()
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index ed3429a037..a2f6ea538c 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -83,7 +83,6 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
- node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 9f5e9e5f0d..c9fad91481 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -452,7 +452,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
- self.nodes.append(TestNode(
+ test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
@@ -470,7 +470,15 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
- ))
+ )
+ self.nodes.append(test_node_i)
+ if not test_node_i.version_is_at_least(170000):
+ # adjust conf for pre 17
+ conf_file = test_node_i.bitcoinconf
+ with open(conf_file, 'r', encoding='utf8') as conf:
+ conf_data = conf.read()
+ with open(conf_file, 'w', encoding='utf8') as conf:
+ conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index ebc0501e11..66bb2c89b5 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -23,6 +23,7 @@ import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
+from .messages import MY_SUBVERSION
from .util import (
MAX_NODES,
append_config,
@@ -549,11 +550,16 @@ class TestNode():
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
+ def num_connected_mininodes(self):
+ """Return number of test framework p2p connections to the node."""
+ return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION])
+
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
+ wait_until(lambda: self.num_connected_mininodes() == 0)
class TestNodeCLIAttr:
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 52306c8c3d..17b2cbb971 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -25,6 +25,7 @@ logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
+
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
@@ -32,6 +33,7 @@ def assert_approx(v, vexp, vspan=0.00001):
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
+
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
@@ -41,21 +43,26 @@ def assert_fee_amount(fee, tx_size, fee_per_kB):
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
+
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
+
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
+
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
+
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
+
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
@@ -71,6 +78,7 @@ def assert_raises_message(exc, message, fun, *args, **kwds):
else:
raise AssertionError("No exception raised")
+
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
@@ -95,6 +103,7 @@ def assert_raises_process_error(returncode, output, fun, *args, **kwds):
else:
raise AssertionError("No exception raised")
+
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
@@ -113,6 +122,7 @@ def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
+
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
@@ -134,22 +144,22 @@ def try_rpc(code, message, fun, *args, **kwds):
else:
return False
+
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
- raise AssertionError(
- "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
+ raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
+
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
- raise AssertionError(
- "String of length %d expected; got %d" % (length, len(string)))
+ raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
- raise AssertionError(
- "String %r contains invalid characters for a hash." % string)
+ raise AssertionError("String %r contains invalid characters for a hash." % string)
+
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
@@ -180,9 +190,11 @@ def assert_array_result(object_array, to_match, expected, should_not_find=False)
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
+
# Utility functions
###################
+
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
@@ -190,11 +202,13 @@ def check_json_precision():
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
+
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
+
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
@@ -202,12 +216,15 @@ def count_bytes(hex_string):
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
+
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
+
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
@@ -235,6 +252,7 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
+
# RPC/P2P connection constants and functions
############################################
@@ -250,6 +268,7 @@ class PortSeed:
# Must be initialized with a unique integer for each process
n = None
+
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
@@ -271,18 +290,20 @@ def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
- coverage_logfile = coverage.get_filename(
- coveragedir, node_number) if coveragedir else None
+ coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
+
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
@@ -295,9 +316,11 @@ def rpc_url(datadir, i, chain, rpchost):
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
+
# Node functions
################
+
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
@@ -327,21 +350,17 @@ def initialize_datadir(dirname, n, chain):
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
-def adjust_bitcoin_conf_for_pre_17(conf_file):
- with open(conf_file,'r', encoding='utf8') as conf:
- conf_data = conf.read()
- with open(conf_file, 'w', encoding='utf8') as conf:
- conf_data_changed = conf_data.replace('[regtest]', '')
- conf.write(conf_data_changed)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
+
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
+
def get_auth_cookie(datadir, chain):
user = None
password = None
@@ -366,20 +385,24 @@ def get_auth_cookie(datadir, chain):
raise ValueError("No RPC credentials")
return user, password
+
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
+
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
+
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
+
def disconnect_nodes(from_connection, node_num):
def get_peer_ids():
result = []
@@ -392,7 +415,7 @@ def disconnect_nodes(from_connection, node_num):
if not peer_ids:
logger.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
- node_num
+ node_num,
))
return
for peer_id in peer_ids:
@@ -402,12 +425,13 @@ def disconnect_nodes(from_connection, node_num):
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
- if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
+ if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: not get_peer_ids(), timeout=5)
+
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
@@ -479,6 +503,7 @@ def find_output(node, txid, amount, *, blockhash=None):
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
+
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
@@ -496,6 +521,7 @@ def gather_inputs(from_node, amount_needed, confirmations_required=1):
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
+
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
@@ -513,6 +539,7 @@ def make_change(from_node, amount_in, amount_out, fee):
outputs[from_node.getnewaddress()] = change
return outputs
+
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
@@ -532,6 +559,7 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
return (txid, signresult["hex"], fee)
+
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
@@ -564,6 +592,7 @@ def create_confirmed_utxos(fee, node, count):
assert len(utxos) >= count
return utxos
+
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
@@ -583,6 +612,7 @@ def gen_return_txouts():
txouts.append(txout)
return txouts
+
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
@@ -606,6 +636,7 @@ def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
txids.append(txid)
return txids
+
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
@@ -619,6 +650,7 @@ def mine_large_block(node, utxos=None):
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
+
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py
index bb81746715..cc2139a027 100755
--- a/test/functional/wallet_upgradewallet.py
+++ b/test/functional/wallet_upgradewallet.py
@@ -16,7 +16,6 @@ import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
- adjust_bitcoin_conf_for_pre_17,
assert_equal,
assert_greater_than,
assert_is_hex_string,
@@ -46,9 +45,6 @@ class UpgradeWalletTest(BitcoinTestFramework):
160300,
150200,
])
- # adapt bitcoin.conf, because older bitcoind's don't recognize config sections
- adjust_bitcoin_conf_for_pre_17(self.nodes[1].bitcoinconf)
- adjust_bitcoin_conf_for_pre_17(self.nodes[2].bitcoinconf)
self.start_nodes()
def dumb_sync_blocks(self):