aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am9
-rw-r--r--src/Makefile.leveldb.include1
-rw-r--r--src/bench/wallet_loading.cpp76
-rw-r--r--src/coins.h1
-rw-r--r--src/consensus/consensus.h2
-rw-r--r--src/dbwrapper.cpp2
-rw-r--r--src/dbwrapper.h21
-rw-r--r--src/init.cpp40
-rw-r--r--src/kernel/mempool_limits.h30
-rw-r--r--src/kernel/mempool_options.h38
-rw-r--r--src/mempool_args.cpp37
-rw-r--r--src/mempool_args.h22
-rw-r--r--src/minisketch/README.md4
-rw-r--r--src/minisketch/include/minisketch.h3
-rw-r--r--src/net.cpp15
-rw-r--r--src/net_processing.cpp538
-rw-r--r--src/net_processing.h8
-rw-r--r--src/node/interfaces.cpp19
-rw-r--r--src/policy/fees.cpp16
-rw-r--r--src/policy/fees.h4
-rw-r--r--src/policy/fees_args.cpp12
-rw-r--r--src/policy/fees_args.h15
-rw-r--r--src/policy/packages.h4
-rw-r--r--src/policy/policy.h9
-rw-r--r--src/psbt.cpp69
-rw-r--r--src/psbt.h288
-rw-r--r--src/pubkey.h3
-rw-r--r--src/qt/bitcoin.cpp23
-rw-r--r--src/qt/bitcoingui.cpp1
-rw-r--r--src/qt/optionsdialog.cpp15
-rw-r--r--src/qt/optionsdialog.h3
-rw-r--r--src/rpc/blockchain.cpp10
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/fees.cpp2
-rw-r--r--src/rpc/mempool.cpp157
-rw-r--r--src/rpc/net.cpp6
-rw-r--r--src/rpc/rawtransaction.cpp169
-rw-r--r--src/rpc/server.cpp12
-rw-r--r--src/script/descriptor.cpp2
-rw-r--r--src/script/sign.cpp26
-rw-r--r--src/script/sign.h2
-rw-r--r--src/script/signingprovider.cpp21
-rw-r--r--src/script/signingprovider.h5
-rw-r--r--src/script/standard.cpp17
-rw-r--r--src/script/standard.h2
-rw-r--r--src/test/denialofservice_tests.cpp10
-rw-r--r--src/test/fuzz/policy_estimator.cpp8
-rw-r--r--src/test/fuzz/policy_estimator_io.cpp8
-rw-r--r--src/test/fuzz/rbf.cpp21
-rw-r--r--src/test/fuzz/rpc.cpp1
-rw-r--r--src/test/fuzz/tx_pool.cpp20
-rw-r--r--src/test/fuzz/util.cpp53
-rw-r--r--src/test/fuzz/util.h6
-rw-r--r--src/test/fuzz/validation_load_mempool.cpp4
-rw-r--r--src/test/mempool_tests.cpp8
-rw-r--r--src/test/miner_tests.cpp4
-rw-r--r--src/test/util/net.h10
-rw-r--r--src/test/util/setup_common.cpp26
-rw-r--r--src/test/util/setup_common.h3
-rw-r--r--src/txdb.cpp6
-rw-r--r--src/txmempool.cpp40
-rw-r--r--src/txmempool.h46
-rw-r--r--src/util/error.cpp2
-rw-r--r--src/util/error.h1
-rw-r--r--src/util/sock.cpp15
-rw-r--r--src/util/sock.h19
-rw-r--r--src/util/system.cpp59
-rw-r--r--src/util/system.h8
-rw-r--r--src/util/time.h12
-rw-r--r--src/validation.cpp46
-rw-r--r--src/validation.h2
-rw-r--r--src/wallet/bdb.cpp6
-rw-r--r--src/wallet/coinselection.cpp3
-rw-r--r--src/wallet/scriptpubkeyman.cpp13
-rw-r--r--src/wallet/spend.cpp14
-rw-r--r--src/wallet/test/coinselector_tests.cpp7
-rw-r--r--src/wallet/wallet.cpp29
-rw-r--r--src/wallet/walletdb.cpp41
-rw-r--r--src/wallet/walletdb.h1
79 files changed, 1816 insertions, 496 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index a3b975809b..bc0982f74d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -173,11 +173,14 @@ BITCOIN_CORE_H = \
kernel/checks.h \
kernel/coinstats.h \
kernel/context.h \
+ kernel/mempool_limits.h \
+ kernel/mempool_options.h \
key.h \
key_io.h \
logging.h \
logging/timer.h \
mapport.h \
+ mempool_args.h \
memusage.h \
merkleblock.h \
net.h \
@@ -203,6 +206,7 @@ BITCOIN_CORE_H = \
outputtype.h \
policy/feerate.h \
policy/fees.h \
+ policy/fees_args.h \
policy/packages.h \
policy/policy.h \
policy/rbf.h \
@@ -361,6 +365,7 @@ libbitcoin_node_a_SOURCES = \
kernel/coinstats.cpp \
kernel/context.cpp \
mapport.cpp \
+ mempool_args.cpp \
net.cpp \
netgroup.cpp \
net_processing.cpp \
@@ -377,6 +382,7 @@ libbitcoin_node_a_SOURCES = \
node/interface_ui.cpp \
noui.cpp \
policy/fees.cpp \
+ policy/fees_args.cpp \
policy/packages.cpp \
policy/rbf.cpp \
policy/settings.cpp \
@@ -848,8 +854,7 @@ endif
# TODO: libbitcoinkernel is a work in progress consensus engine library, as more
# and more modules are decoupled from the consensus engine, this list will
-# shrink to only those which are absolutely necessary. For example, things
-# like index/*.cpp will be removed.
+# shrink to only those which are absolutely necessary.
libbitcoinkernel_la_SOURCES = \
kernel/bitcoinkernel.cpp \
arith_uint256.cpp \
diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include
index 066f8940c5..bf14fe206b 100644
--- a/src/Makefile.leveldb.include
+++ b/src/Makefile.leveldb.include
@@ -13,7 +13,6 @@ LIBMEMENV = $(LIBMEMENV_INT)
LEVELDB_CPPFLAGS =
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include
-LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv
LEVELDB_CPPFLAGS_INT =
LEVELDB_CPPFLAGS_INT += -I$(srcdir)/leveldb
diff --git a/src/bench/wallet_loading.cpp b/src/bench/wallet_loading.cpp
index 38d3460001..f611383788 100644
--- a/src/bench/wallet_loading.cpp
+++ b/src/bench/wallet_loading.cpp
@@ -17,20 +17,19 @@
#include <optional>
using wallet::CWallet;
+using wallet::DatabaseFormat;
using wallet::DatabaseOptions;
-using wallet::DatabaseStatus;
using wallet::ISMINE_SPENDABLE;
using wallet::MakeWalletDatabase;
+using wallet::TxStateInactive;
using wallet::WALLET_FLAG_DESCRIPTORS;
using wallet::WalletContext;
+using wallet::WalletDatabase;
-static const std::shared_ptr<CWallet> BenchLoadWallet(WalletContext& context, DatabaseOptions& options)
+static const std::shared_ptr<CWallet> BenchLoadWallet(std::unique_ptr<WalletDatabase> database, WalletContext& context, DatabaseOptions& options)
{
- DatabaseStatus status;
bilingual_str error;
std::vector<bilingual_str> warnings;
- auto database = MakeWalletDatabase("", options, status, error);
- assert(database);
auto wallet = CWallet::Create(context, "", std::move(database), options.create_flags, error, warnings);
NotifyWalletLoaded(context, wallet);
if (context.chain) {
@@ -46,9 +45,47 @@ static void BenchUnloadWallet(std::shared_ptr<CWallet>&& wallet)
UnloadWallet(std::move(wallet));
}
+static void AddTx(CWallet& wallet)
+{
+ bilingual_str error;
+ CTxDestination dest;
+ wallet.GetNewDestination(OutputType::BECH32, "", dest, error);
+
+ CMutableTransaction mtx;
+ mtx.vout.push_back({COIN, GetScriptForDestination(dest)});
+ mtx.vin.push_back(CTxIn());
+
+ wallet.AddToWallet(MakeTransactionRef(mtx), TxStateInactive{});
+}
+
+static std::unique_ptr<WalletDatabase> DuplicateMockDatabase(WalletDatabase& database, DatabaseOptions& options)
+{
+ auto new_database = CreateMockWalletDatabase(options);
+
+ // Get a cursor to the original database
+ auto batch = database.MakeBatch();
+ batch->StartCursor();
+
+ // Get a batch for the new database
+ auto new_batch = new_database->MakeBatch();
+
+ // Read all records from the original database and write them to the new one
+ while (true) {
+ CDataStream key(SER_DISK, CLIENT_VERSION);
+ CDataStream value(SER_DISK, CLIENT_VERSION);
+ bool complete;
+ batch->ReadAtCursor(key, value, complete);
+ if (complete) break;
+ new_batch->Write(key, value);
+ }
+
+ return new_database;
+}
+
static void WalletLoading(benchmark::Bench& bench, bool legacy_wallet)
{
const auto test_setup = MakeNoLogFileContext<TestingSetup>();
+ test_setup->m_args.ForceSetArg("-unsafesqlitesync", "1");
WalletContext context;
context.args = &test_setup->m_args;
@@ -57,27 +94,40 @@ static void WalletLoading(benchmark::Bench& bench, bool legacy_wallet)
// Setup the wallet
// Loading the wallet will also create it
DatabaseOptions options;
- if (!legacy_wallet) options.create_flags = WALLET_FLAG_DESCRIPTORS;
- auto wallet = BenchLoadWallet(context, options);
+ if (legacy_wallet) {
+ options.require_format = DatabaseFormat::BERKELEY;
+ } else {
+ options.create_flags = WALLET_FLAG_DESCRIPTORS;
+ options.require_format = DatabaseFormat::SQLITE;
+ }
+ auto database = CreateMockWalletDatabase(options);
+ auto wallet = BenchLoadWallet(std::move(database), context, options);
// Generate a bunch of transactions and addresses to put into the wallet
- for (int i = 0; i < 5000; ++i) {
- generatetoaddress(test_setup->m_node, getnewaddress(*wallet));
+ for (int i = 0; i < 1000; ++i) {
+ AddTx(*wallet);
}
+ database = DuplicateMockDatabase(wallet->GetDatabase(), options);
+
// reload the wallet for the actual benchmark
BenchUnloadWallet(std::move(wallet));
- bench.minEpochIterations(10).run([&] {
- wallet = BenchLoadWallet(context, options);
+ bench.epochs(5).run([&] {
+ wallet = BenchLoadWallet(std::move(database), context, options);
// Cleanup
+ database = DuplicateMockDatabase(wallet->GetDatabase(), options);
BenchUnloadWallet(std::move(wallet));
});
}
+#ifdef USE_BDB
static void WalletLoadingLegacy(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/true); }
-static void WalletLoadingDescriptors(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/false); }
-
BENCHMARK(WalletLoadingLegacy);
+#endif
+
+#ifdef USE_SQLITE
+static void WalletLoadingDescriptors(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/false); }
BENCHMARK(WalletLoadingDescriptors);
+#endif
diff --git a/src/coins.h b/src/coins.h
index de297dd427..67fecc9785 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -142,7 +142,6 @@ public:
virtual bool GetKey(COutPoint &key) const = 0;
virtual bool GetValue(Coin &coin) const = 0;
- virtual unsigned int GetValueSize() const = 0;
virtual bool Valid() const = 0;
virtual void Next() = 0;
diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h
index 788fa4e55b..b2a31e3ba4 100644
--- a/src/consensus/consensus.h
+++ b/src/consensus/consensus.h
@@ -26,7 +26,5 @@ static const size_t MIN_SERIALIZABLE_TRANSACTION_WEIGHT = WITNESS_SCALE_FACTOR *
/** Flags for nSequence and nLockTime locks */
/** Interpret sequence numbers as relative lock-time constraints. */
static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0);
-/** Use GetMedianTimePast() instead of nTime for end point timestamp. */
-static constexpr unsigned int LOCKTIME_MEDIAN_TIME_PAST = (1 << 1);
#endif // BITCOIN_CONSENSUS_CONSENSUS_H
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index a2f1f32780..d4a8e4f35a 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -10,7 +10,7 @@
#include <leveldb/cache.h>
#include <leveldb/env.h>
#include <leveldb/filter_policy.h>
-#include <memenv.h>
+#include <leveldb/helpers/memenv/memenv.h>
#include <stdint.h>
#include <algorithm>
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 1109cb5888..cef8426d61 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -166,11 +166,6 @@ public:
}
return true;
}
-
- unsigned int GetValueSize() {
- return piter->value().size();
- }
-
};
class CDBWrapper
@@ -318,22 +313,6 @@ public:
pdb->GetApproximateSizes(&range, 1, &size);
return size;
}
-
- /**
- * Compact a certain range of keys in the database.
- */
- template<typename K>
- void CompactRange(const K& key_begin, const K& key_end) const
- {
- CDataStream ssKey1(SER_DISK, CLIENT_VERSION), ssKey2(SER_DISK, CLIENT_VERSION);
- ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
- ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
- ssKey1 << key_begin;
- ssKey2 << key_end;
- leveldb::Slice slKey1((const char*)ssKey1.data(), ssKey1.size());
- leveldb::Slice slKey2((const char*)ssKey2.data(), ssKey2.size());
- pdb->CompactRange(&slKey1, &slKey2);
- }
};
#endif // BITCOIN_DBWRAPPER_H
diff --git a/src/init.cpp b/src/init.cpp
index f20c55dcb1..d844e9b169 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -30,6 +30,7 @@
#include <interfaces/init.h>
#include <interfaces/node.h>
#include <mapport.h>
+#include <mempool_args.h>
#include <net.h>
#include <net_permissions.h>
#include <net_processing.h>
@@ -39,10 +40,11 @@
#include <node/caches.h>
#include <node/chainstate.h>
#include <node/context.h>
-#include <node/miner.h>
#include <node/interface_ui.h>
+#include <node/miner.h>
#include <policy/feerate.h>
#include <policy/fees.h>
+#include <policy/fees_args.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <protocol.h>
@@ -62,6 +64,7 @@
#include <txorphanage.h>
#include <util/asmap.h>
#include <util/check.h>
+#include <util/designator.h>
#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -413,9 +416,9 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE_MB), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY_HOURS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s, signet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex(), signetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
argsman.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)",
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -536,9 +539,9 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
@@ -928,11 +931,6 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainparams.GetConsensus().nMinimumChainWork.GetHex());
}
- // mempool limits
- int64_t nMempoolSizeMax = args.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t nMempoolSizeMin = args.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
- if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin)
- return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0)));
// incremental relay fee sets the minimum feerate increase necessary for BIP 125 replacement in the mempool
// and the amount the mempool min fee increases above the feerate of txs evicted due to mempool limiting.
if (args.IsArgSet("-incrementalrelayfee")) {
@@ -968,7 +966,7 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
peer_connect_timeout = args.GetIntArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
if (peer_connect_timeout <= 0) {
- return InitError(Untranslated("peertimeout cannot be configured with a negative value."));
+ return InitError(Untranslated("peertimeout must be a positive integer."));
}
if (args.IsArgSet("-minrelaytxfee")) {
@@ -1294,7 +1292,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
assert(!node.fee_estimator);
// Don't initialize fee estimation with old data if we don't relay transactions,
// as they would never get updated.
- if (!ignores_incoming_txs) node.fee_estimator = std::make_unique<CBlockPolicyEstimator>();
+ if (!ignores_incoming_txs) node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(args));
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
@@ -1411,7 +1409,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// cache size calculations
CacheSizes cache_sizes = CalculateCacheSizes(args, g_enabled_filter_types.size());
- int64_t nMempoolSizeMax = args.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1f MiB for block index database\n", cache_sizes.block_tree_db * (1.0 / 1024 / 1024));
if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
@@ -1422,14 +1419,25 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
cache_sizes.filter_index * (1.0 / 1024 / 1024), BlockFilterTypeName(filter_type));
}
LogPrintf("* Using %.1f MiB for chain state database\n", cache_sizes.coins_db * (1.0 / 1024 / 1024));
- LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of unused mempool space)\n", cache_sizes.coins * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024));
assert(!node.mempool);
assert(!node.chainman);
- const int mempool_check_ratio = std::clamp<int>(args.GetIntArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0, 1000000);
+
+ CTxMemPool::Options mempool_opts{
+ Desig(estimator) node.fee_estimator.get(),
+ Desig(check_ratio) chainparams.DefaultConsistencyChecks() ? 1 : 0,
+ };
+ ApplyArgsManOptions(args, mempool_opts);
+ mempool_opts.check_ratio = std::clamp<int>(mempool_opts.check_ratio, 0, 1'000'000);
+
+ int64_t descendant_limit_bytes = mempool_opts.limits.descendant_size_vbytes * 40;
+ if (mempool_opts.max_size_bytes < 0 || mempool_opts.max_size_bytes < descendant_limit_bytes) {
+ return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(descendant_limit_bytes / 1'000'000.0)));
+ }
+ LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of unused mempool space)\n", cache_sizes.coins * (1.0 / 1024 / 1024), mempool_opts.max_size_bytes * (1.0 / 1024 / 1024));
for (bool fLoaded = false; !fLoaded && !ShutdownRequested();) {
- node.mempool = std::make_unique<CTxMemPool>(node.fee_estimator.get(), mempool_check_ratio);
+ node.mempool = std::make_unique<CTxMemPool>(mempool_opts);
const ChainstateManager::Options chainman_opts{
chainparams,
diff --git a/src/kernel/mempool_limits.h b/src/kernel/mempool_limits.h
new file mode 100644
index 0000000000..e192e7e6cd
--- /dev/null
+++ b/src/kernel/mempool_limits.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#ifndef BITCOIN_KERNEL_MEMPOOL_LIMITS_H
+#define BITCOIN_KERNEL_MEMPOOL_LIMITS_H
+
+#include <policy/policy.h>
+
+#include <cstdint>
+
+namespace kernel {
+/**
+ * Options struct containing limit options for a CTxMemPool. Default constructor
+ * populates the struct with sane default values which can be modified.
+ *
+ * Most of the time, this struct should be referenced as CTxMemPool::Limits.
+ */
+struct MemPoolLimits {
+ //! The maximum allowed number of transactions in a package including the entry and its ancestors.
+ int64_t ancestor_count{DEFAULT_ANCESTOR_LIMIT};
+ //! The maximum allowed size in virtual bytes of an entry and its ancestors within a package.
+ int64_t ancestor_size_vbytes{DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1'000};
+ //! The maximum allowed number of transactions in a package including the entry and its descendants.
+ int64_t descendant_count{DEFAULT_DESCENDANT_LIMIT};
+ //! The maximum allowed size in virtual bytes of an entry and its descendants within a package.
+ int64_t descendant_size_vbytes{DEFAULT_DESCENDANT_SIZE_LIMIT_KVB * 1'000};
+};
+} // namespace kernel
+
+#endif // BITCOIN_KERNEL_MEMPOOL_LIMITS_H
diff --git a/src/kernel/mempool_options.h b/src/kernel/mempool_options.h
new file mode 100644
index 0000000000..a14abb6628
--- /dev/null
+++ b/src/kernel/mempool_options.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#ifndef BITCOIN_KERNEL_MEMPOOL_OPTIONS_H
+#define BITCOIN_KERNEL_MEMPOOL_OPTIONS_H
+
+#include <kernel/mempool_limits.h>
+
+#include <chrono>
+#include <cstdint>
+
+class CBlockPolicyEstimator;
+
+/** Default for -maxmempool, maximum megabytes of mempool memory usage */
+static constexpr unsigned int DEFAULT_MAX_MEMPOOL_SIZE_MB{300};
+/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
+static constexpr unsigned int DEFAULT_MEMPOOL_EXPIRY_HOURS{336};
+
+namespace kernel {
+/**
+ * Options struct containing options for constructing a CTxMemPool. Default
+ * constructor populates the struct with sane default values which can be
+ * modified.
+ *
+ * Most of the time, this struct should be referenced as CTxMemPool::Options.
+ */
+struct MemPoolOptions {
+ /* Used to estimate appropriate transaction fees. */
+ CBlockPolicyEstimator* estimator{nullptr};
+ /* The ratio used to determine how often sanity checks will run. */
+ int check_ratio{0};
+ int64_t max_size_bytes{DEFAULT_MAX_MEMPOOL_SIZE_MB * 1'000'000};
+ std::chrono::seconds expiry{std::chrono::hours{DEFAULT_MEMPOOL_EXPIRY_HOURS}};
+ MemPoolLimits limits{};
+};
+} // namespace kernel
+
+#endif // BITCOIN_KERNEL_MEMPOOL_OPTIONS_H
diff --git a/src/mempool_args.cpp b/src/mempool_args.cpp
new file mode 100644
index 0000000000..e26cbe0275
--- /dev/null
+++ b/src/mempool_args.cpp
@@ -0,0 +1,37 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <mempool_args.h>
+
+#include <kernel/mempool_limits.h>
+#include <kernel/mempool_options.h>
+
+#include <util/system.h>
+
+using kernel::MemPoolLimits;
+using kernel::MemPoolOptions;
+
+namespace {
+void ApplyArgsManOptions(const ArgsManager& argsman, MemPoolLimits& mempool_limits)
+{
+ mempool_limits.ancestor_count = argsman.GetIntArg("-limitancestorcount", mempool_limits.ancestor_count);
+
+ if (auto vkb = argsman.GetIntArg("-limitancestorsize")) mempool_limits.ancestor_size_vbytes = *vkb * 1'000;
+
+ mempool_limits.descendant_count = argsman.GetIntArg("-limitdescendantcount", mempool_limits.descendant_count);
+
+ if (auto vkb = argsman.GetIntArg("-limitdescendantsize")) mempool_limits.descendant_size_vbytes = *vkb * 1'000;
+}
+}
+
+void ApplyArgsManOptions(const ArgsManager& argsman, MemPoolOptions& mempool_opts)
+{
+ mempool_opts.check_ratio = argsman.GetIntArg("-checkmempool", mempool_opts.check_ratio);
+
+ if (auto mb = argsman.GetIntArg("-maxmempool")) mempool_opts.max_size_bytes = *mb * 1'000'000;
+
+ if (auto hours = argsman.GetIntArg("-mempoolexpiry")) mempool_opts.expiry = std::chrono::hours{*hours};
+
+ ApplyArgsManOptions(argsman, mempool_opts.limits);
+}
diff --git a/src/mempool_args.h b/src/mempool_args.h
new file mode 100644
index 0000000000..9a4abe6618
--- /dev/null
+++ b/src/mempool_args.h
@@ -0,0 +1,22 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_MEMPOOL_ARGS_H
+#define BITCOIN_MEMPOOL_ARGS_H
+
+class ArgsManager;
+namespace kernel {
+struct MemPoolOptions;
+};
+
+/**
+ * Overlay the options set in \p argsman on top of corresponding members in \p mempool_opts.
+ *
+ * @param[in] argsman The ArgsManager in which to check set options.
+ * @param[in,out] mempool_opts The MemPoolOptions to modify according to \p argsman.
+ */
+void ApplyArgsManOptions(const ArgsManager& argsman, kernel::MemPoolOptions& mempool_opts);
+
+
+#endif // BITCOIN_MEMPOOL_ARGS_H
diff --git a/src/minisketch/README.md b/src/minisketch/README.md
index c0cfdc1623..f8b89ff33e 100644
--- a/src/minisketch/README.md
+++ b/src/minisketch/README.md
@@ -203,8 +203,8 @@ Some improvements that are still TODO:
* <a name="myfootnote4">[4]</a> Bhaskar Biswas, Vincent Herbert. *Efficient Root Finding of Polynomials over Fields of Characteristic 2.* 2009. hal-00626997. [[URL]](https://hal.archives-ouvertes.fr/hal-00626997) [[PDF]](https://hal.archives-ouvertes.fr/hal-00626997/document)
* <a name="myfootnote6">[6]</a> Eppstein, David, Michael T. Goodrich, Frank Uyeda, and George Varghese. *What's the difference?: efficient set reconciliation without prior context.* ACM SIGCOMM Computer Communication Review, vol. 41, no. 4, pp. 218-229. ACM, 2011. [[PDF]](https://www.ics.uci.edu/~eppstein/pubs/EppGooUye-SIGCOMM-11.pdf)
* <a name="myfootnote7">[7]</a> Goodrich, Michael T. and Michael Mitzenmacher. *Invertible bloom lookup tables.* 2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton) (2011): 792-799. [[PDF]](https://arxiv.org/pdf/1101.2245.pdf)
-* <a name="myfootnote8">[8]</a> Maxwell, Gregory F. *[Blocksonly mode BW savings, the limits of efficient block xfer, and better relay](https://bitcointalk.org/index.php?topic=1377345.0)* Bitcointalk 2016, *[Technical notes on mempool synchronizing relay](https://people.xiph.org/~greg/mempool_sync_relay.txt)* #bitcoin-wizards 2016.
-* <a name="myfootnote9">[9]</a> Maxwell, Gregory F. *[Block network coding](https://en.bitcoin.it/wiki/User:Gmaxwell/block_network_coding)* Bitcoin Wiki 2014, *[Technical notes on efficient block xfer](https://people.xiph.org/~greg/efficient.block.xfer.txt)* #bitcoin-wizards 2015.
+* <a name="myfootnote8">[8]</a> Maxwell, Gregory F. *[Blocksonly mode BW savings, the limits of efficient block xfer, and better relay](https://bitcointalk.org/index.php?topic=1377345.0)* Bitcointalk 2016, *[Technical notes on mempool synchronizing relay](https://nt4tn.net/tech-notes/2016.mempool_sync_relay.txt)* #bitcoin-wizards 2016.
+* <a name="myfootnote9">[9]</a> Maxwell, Gregory F. *[Block network coding](https://en.bitcoin.it/wiki/User:Gmaxwell/block_network_coding)* Bitcoin Wiki 2014, *[Technical notes on efficient block xfer](https://nt4tn.net/tech-notes/201512.efficient.block.xfer.txt)* #bitcoin-wizards 2015.
* <a name="myfootnote10">[10]</a> Ruffing, Tim, Moreno-Sanchez, Pedro, Aniket, Kate, *P2P Mixing and Unlinkable Bitcoin Transactions* NDSS Symposium 2017 [[URL]](https://eprint.iacr.org/2016/824) [[PDF]](https://eprint.iacr.org/2016/824.pdf)
* <a name="myfootnote11">[11]</a> Y. Misky, A. Trachtenberg, R. Zippel. *Set Reconciliation with Nearly Optimal Communication Complexity.* Cornell University, 2000. [[URL]](https://ecommons.cornell.edu/handle/1813/5803) [[PDF]](https://ecommons.cornell.edu/bitstream/handle/1813/5803/2000-1813.pdf)
* <a name="myfootnote12">[12]</a> Itoh, Toshiya, and Shigeo Tsujii. "A fast algorithm for computing multiplicative inverses in GF (2m) using normal bases." Information and computation 78, no. 3 (1988): 171-177. [[URL]](https://www.sciencedirect.com/science/article/pii/0890540188900247)
diff --git a/src/minisketch/include/minisketch.h b/src/minisketch/include/minisketch.h
index 0b5d8372e8..24d6b4e1c0 100644
--- a/src/minisketch/include/minisketch.h
+++ b/src/minisketch/include/minisketch.h
@@ -5,7 +5,8 @@
#include <stdlib.h>
#ifdef _MSC_VER
-# include <compat.h>
+# include <BaseTsd.h>
+ typedef SSIZE_T ssize_t;
#else
# include <unistd.h>
#endif
diff --git a/src/net.cpp b/src/net.cpp
index c05fa771ef..7f4e571c8d 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -422,13 +422,13 @@ bool CConnman::CheckIncomingNonce(uint64_t nonce)
}
/** Get the bind address for a socket as CAddress */
-static CAddress GetBindAddress(SOCKET sock)
+static CAddress GetBindAddress(const Sock& sock)
{
CAddress addr_bind;
struct sockaddr_storage sockaddr_bind;
socklen_t sockaddr_bind_len = sizeof(sockaddr_bind);
- if (sock != INVALID_SOCKET) {
- if (!getsockname(sock, (struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) {
+ if (sock.Get() != INVALID_SOCKET) {
+ if (!sock.GetSockName((struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) {
addr_bind.SetSockAddr((const struct sockaddr*)&sockaddr_bind);
} else {
LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "getsockname failed\n");
@@ -540,7 +540,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
if (!addr_bind.IsValid()) {
- addr_bind = GetBindAddress(sock->Get());
+ addr_bind = GetBindAddress(*sock);
}
CNode* pnode = new CNode(id,
nLocalServices,
@@ -1154,7 +1154,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
addr = CAddress{MaybeFlipIPv6toCJDNS(addr), NODE_NONE};
}
- const CAddress addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(sock->Get())), NODE_NONE};
+ const CAddress addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(*sock)), NODE_NONE};
NetPermissionFlags permissionFlags = NetPermissionFlags::None;
hListenSocket.AddSocketPermissionFlags(permissionFlags);
@@ -2323,8 +2323,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
#endif
}
- if (::bind(sock->Get(), (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR)
- {
+ if (sock->Bind(reinterpret_cast<struct sockaddr*>(&sockaddr), len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE)
strError = strprintf(_("Unable to bind to %s on this computer. %s is probably already running."), addrBind.ToString(), PACKAGE_NAME);
@@ -2336,7 +2335,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
- if (listen(sock->Get(), SOMAXCONN) == SOCKET_ERROR)
+ if (sock->Listen(SOMAXCONN) == SOCKET_ERROR)
{
strError = strprintf(_("Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError()));
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 751a03f01c..c33dd29923 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -61,6 +61,8 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
+/** How long to wait for a peer to respond to a getheaders request */
+static constexpr auto HEADERS_RESPONSE_TIME{2min};
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
@@ -355,6 +357,9 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
+ /** Time of the last getheaders message to this peer */
+ std::atomic<NodeClock::time_point> m_last_getheaders_timestamp{NodeSeconds{}};
+
Peer(NodeId id)
: m_id{id}
{}
@@ -493,7 +498,7 @@ public:
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SetBestHeight(int height) override { m_best_height = height; };
- void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex);
@@ -501,7 +506,7 @@ public:
private:
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
- void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -518,6 +523,12 @@ private:
PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
+ * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
+ * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
+ */
+ void Misbehaving(Peer& peer, int howmuch, const std::string& message);
+
+ /**
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
*
* @param[in] via_compact_block this bool is passed in because net_processing should
@@ -550,13 +561,28 @@ private:
void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Process a single headers message from a peer. */
- void ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
+ void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
+ /** Deal with state tracking and headers sync for peers that send the
+ * occasional non-connecting header (this can happen due to BIP 130 headers
+ * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
+ void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers);
+ /** Return true if the headers connect to each other, false otherwise */
+ bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
+ /** Request further headers from this peer with a given locator.
+ * We don't issue a getheaders message if we have a recent one outstanding.
+ * This returns true if a getheaders is actually sent, and false otherwise.
+ */
+ bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer);
+ /** Potentially fetch blocks from this peer upon receipt of a new headers tip */
+ void HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast);
+ /** Update peer state based on received headers message */
+ void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers);
- void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req)
- EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
/** Register with TxRequestTracker that an INV has been received from a
* peer. The announcement parameters are decided in PeerManager and then
@@ -1444,33 +1470,31 @@ void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
}
-void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
+void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message)
{
assert(howmuch > 0);
- PeerRef peer = GetPeerRef(pnode);
- if (peer == nullptr) return;
-
- LOCK(peer->m_misbehavior_mutex);
- const int score_before{peer->m_misbehavior_score};
- peer->m_misbehavior_score += howmuch;
- const int score_now{peer->m_misbehavior_score};
+ LOCK(peer.m_misbehavior_mutex);
+ const int score_before{peer.m_misbehavior_score};
+ peer.m_misbehavior_score += howmuch;
+ const int score_now{peer.m_misbehavior_score};
const std::string message_prefixed = message.empty() ? "" : (": " + message);
std::string warning;
if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
warning = " DISCOURAGE THRESHOLD EXCEEDED";
- peer->m_should_discourage = true;
+ peer.m_should_discourage = true;
}
LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
- pnode, score_before, score_now, warning, message_prefixed);
+ peer.m_id, score_before, score_now, warning, message_prefixed);
}
bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message)
{
+ PeerRef peer{GetPeerRef(nodeid)};
switch (state.GetResult()) {
case BlockValidationResult::BLOCK_RESULT_UNSET:
break;
@@ -1478,7 +1502,7 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
case BlockValidationResult::BLOCK_CONSENSUS:
case BlockValidationResult::BLOCK_MUTATED:
if (!via_compact_block) {
- Misbehaving(nodeid, 100, message);
+ if (peer) Misbehaving(*peer, 100, message);
return true;
}
break;
@@ -1493,7 +1517,7 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
// Discourage outbound (but not inbound) peers if on an invalid chain.
// Exempt HB compact block peers. Manual connections are always protected from discouragement.
if (!via_compact_block && !node_state->m_is_inbound) {
- Misbehaving(nodeid, 100, message);
+ if (peer) Misbehaving(*peer, 100, message);
return true;
}
break;
@@ -1501,12 +1525,12 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
case BlockValidationResult::BLOCK_INVALID_HEADER:
case BlockValidationResult::BLOCK_CHECKPOINT:
case BlockValidationResult::BLOCK_INVALID_PREV:
- Misbehaving(nodeid, 100, message);
+ if (peer) Misbehaving(*peer, 100, message);
return true;
// Conflicting (but not necessarily invalid) data or different policy:
case BlockValidationResult::BLOCK_MISSING_PREV:
// TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
- Misbehaving(nodeid, 10, message);
+ if (peer) Misbehaving(*peer, 10, message);
return true;
case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
case BlockValidationResult::BLOCK_TIME_FUTURE:
@@ -1520,12 +1544,13 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message)
{
+ PeerRef peer{GetPeerRef(nodeid)};
switch (state.GetResult()) {
case TxValidationResult::TX_RESULT_UNSET:
break;
// The node is providing invalid data:
case TxValidationResult::TX_CONSENSUS:
- Misbehaving(nodeid, 100, message);
+ if (peer) Misbehaving(*peer, 100, message);
return true;
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
@@ -2175,12 +2200,12 @@ uint32_t PeerManagerImpl::GetFetchFlags(const CNode& pfrom) const EXCLUSIVE_LOCK
return nFetchFlags;
}
-void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req)
+void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
{
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
- Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
+ Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
@@ -2190,7 +2215,204 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, const CBlock& block, c
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
}
-void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
+/**
+ * Special handling for unconnecting headers that might be part of a block
+ * announcement.
+ *
+ * We'll send a getheaders message in response to try to connect the chain.
+ *
+ * The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
+ * don't connect before given DoS points.
+ *
+ * Once a headers message is received that is valid and does connect,
+ * nUnconnectingHeaders gets reset back to 0.
+ */
+void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
+ const std::vector<CBlockHeader>& headers)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ nodestate->nUnconnectingHeaders++;
+ // Try to fill in the missing headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), peer)) {
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ headers[0].GetHash().ToString(),
+ headers[0].hashPrevBlock.ToString(),
+ m_chainman.m_best_header->nHeight,
+ pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ // Set hashLastUnknownBlock for this peer, so that if we
+ // eventually get the headers - even from a different peer -
+ // we can use this peer to download.
+ UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
+
+ // The peer may just be broken, so periodically assign DoS points if this
+ // condition persists.
+ if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
+ Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
+ }
+}
+
+bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
+{
+ uint256 hashLastBlock;
+ for (const CBlockHeader& header : headers) {
+ if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
+ return false;
+ }
+ hashLastBlock = header.GetHash();
+ }
+ return true;
+}
+
+bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ const auto current_time = NodeClock::now();
+
+ // Only allow a new getheaders message to go out if we don't have a recent
+ // one already in-flight
+ if (current_time - peer.m_last_getheaders_timestamp.load() > HEADERS_RESPONSE_TIME) {
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
+ peer.m_last_getheaders_timestamp = current_time;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip.
+ * We require that the given tip have at least as much work as our tip, and for
+ * our current tip to be "close to synced" (see CanDirectFetch()).
+ */
+void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
+
+ std::vector<const CBlockIndex*> vToFetch;
+ const CBlockIndex *pindexWalk = pindexLast;
+ // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
+ while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
+ !IsBlockRequested(pindexWalk->GetBlockHash()) &&
+ (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
+ // We don't have this block, and it's not yet in flight.
+ vToFetch.push_back(pindexWalk);
+ }
+ pindexWalk = pindexWalk->pprev;
+ }
+ // If pindexWalk still isn't on our main chain, we're looking at a
+ // very large reorg at a time we think we're close to caught up to
+ // the main chain -- this shouldn't really happen. Bail out on the
+ // direct fetch and rely on parallel download instead.
+ if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
+ LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
+ pindexLast->GetBlockHash().ToString(),
+ pindexLast->nHeight);
+ } else {
+ std::vector<CInv> vGetData;
+ // Download as much as possible, from earliest to latest.
+ for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
+ if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ // Can't download any more from this peer
+ break;
+ }
+ uint32_t nFetchFlags = GetFetchFlags(pfrom);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ BlockRequested(pfrom.GetId(), *pindex);
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ pindex->GetBlockHash().ToString(), pfrom.GetId());
+ }
+ if (vGetData.size() > 1) {
+ LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
+ }
+ if (vGetData.size() > 0) {
+ if (!m_ignore_incoming_txs &&
+ nodestate->m_provides_cmpctblocks &&
+ vGetData.size() == 1 &&
+ mapBlocksInFlight.size() == 1 &&
+ pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
+ }
+ }
+}
+
+/**
+ * Given receipt of headers from a peer ending in pindexLast, along with
+ * whether that header was new and whether the headers message was full,
+ * update the state we keep for the peer.
+ */
+void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom,
+ const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers)
+{
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+ if (nodestate->nUnconnectingHeaders > 0) {
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ nodestate->nUnconnectingHeaders = 0;
+
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
+
+ // From here, pindexBestKnownBlock should be guaranteed to be non-null,
+ // because it is set in UpdateBlockAvailability. Some nullptr checks
+ // are still present, however, as belt-and-suspenders.
+
+ if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
+ nodestate->m_last_block_announcement = GetTime();
+ }
+
+ // If we're in IBD, we want outbound peers that will serve us a useful
+ // chain. Disconnect peers that are on chains with insufficient work.
+ if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !may_have_more_headers) {
+ // If the peer has no more headers to give us, then we know we have
+ // their tip.
+ if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
+ // This peer has too little work on their headers chain to help
+ // us sync -- disconnect if it is an outbound disconnection
+ // candidate.
+ // Note: We compare their tip to nMinimumChainWork (rather than
+ // m_chainman.ActiveChain().Tip()) because we won't start block download
+ // until we have a headers chain that has at least
+ // nMinimumChainWork, even if a peer has a chain past our tip,
+ // as an anti-DoS measure.
+ if (pfrom.IsOutboundOrBlockRelayConn()) {
+ LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
+ pfrom.fDisconnect = true;
+ }
+ }
+ }
+
+ // If this is an outbound full-relay peer, check to see if we should protect
+ // it from the bad/lagging chain logic.
+ // Note that outbound block-relay peers are excluded from this protection, and
+ // thus always subject to eviction under the bad/lagging chain logic.
+ // See ChainSyncTimeoutState.
+ if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
+ if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
+ LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
+ nodestate->m_chain_sync.m_protect = true;
+ ++m_outbound_peers_with_protect_from_disconnect;
+ }
+ }
+}
+
+void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
{
@@ -2202,55 +2424,33 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
return;
}
- bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr;
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- // If this looks like it could be a block announcement (nCount <
- // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
- // don't connect:
- // - Send a getheaders message in response to try to connect the chain.
- // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
- // don't connect before giving DoS points
- // - Once a headers message is received that is valid and does connect,
- // nUnconnectingHeaders gets reset back to 0.
- if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
- nodestate->nUnconnectingHeaders++;
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
- LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
- headers[0].GetHash().ToString(),
- headers[0].hashPrevBlock.ToString(),
- m_chainman.m_best_header->nHeight,
- pfrom.GetId(), nodestate->nUnconnectingHeaders);
- // Set hashLastUnknownBlock for this peer, so that if we
- // eventually get the headers - even from a different peer -
- // we can use this peer to download.
- UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
-
- if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
- }
- return;
- }
+ // Do these headers connect to something in our block index?
+ bool headers_connect_blockindex{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) != nullptr)};
- uint256 hashLastBlock;
- for (const CBlockHeader& header : headers) {
- if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
- Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
- return;
- }
- hashLastBlock = header.GetHash();
+ if (!headers_connect_blockindex) {
+ if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
+ // If this looks like it could be a BIP 130 block announcement, use
+ // special logic for handling headers that don't connect, as this
+ // could be benign.
+ HandleFewUnconnectingHeaders(pfrom, peer, headers);
+ } else {
+ Misbehaving(peer, 10, "invalid header received");
}
+ return;
+ }
- // If we don't have the last header, then they'll have given us
- // something new (if these headers are valid).
- if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
- received_new_header = true;
- }
+ // At this point, the headers connect to something in our block index.
+ if (!CheckHeadersAreContinuous(headers)) {
+ Misbehaving(peer, 20, "non-continuous headers sequence");
+ return;
}
+ // If we don't have the last header, then this peer will have given us
+ // something new (if these headers are valid).
+ bool received_new_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()) == nullptr)};
+
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders(headers, state, &pindexLast)) {
if (state.IsInvalid()) {
@@ -2259,123 +2459,20 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
}
}
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
- }
- nodestate->nUnconnectingHeaders = 0;
-
- assert(pindexLast);
- UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
-
- // From here, pindexBestKnownBlock should be guaranteed to be non-null,
- // because it is set in UpdateBlockAvailability. Some nullptr checks
- // are still present, however, as belt-and-suspenders.
-
- if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
- nodestate->m_last_block_announcement = GetTime();
- }
-
- if (nCount == MAX_HEADERS_RESULTS) {
- // Headers message had its maximum size; the peer may have more headers.
- // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or m_chainman.m_best_header, continue
- // from there instead.
+ // Consider fetching more headers.
+ if (nCount == MAX_HEADERS_RESULTS) {
+ // Headers message had its maximum size; the peer may have more headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(pindexLast), peer)) {
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
- pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
- }
-
- // If this set of headers is valid and ends in a block with at least as
- // much work as our tip, download as much as possible.
- if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
- std::vector<const CBlockIndex*> vToFetch;
- const CBlockIndex *pindexWalk = pindexLast;
- // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
- while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !IsBlockRequested(pindexWalk->GetBlockHash()) &&
- (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
- // We don't have this block, and it's not yet in flight.
- vToFetch.push_back(pindexWalk);
- }
- pindexWalk = pindexWalk->pprev;
- }
- // If pindexWalk still isn't on our main chain, we're looking at a
- // very large reorg at a time we think we're close to caught up to
- // the main chain -- this shouldn't really happen. Bail out on the
- // direct fetch and rely on parallel download instead.
- if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
- LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
- pindexLast->GetBlockHash().ToString(),
- pindexLast->nHeight);
- } else {
- std::vector<CInv> vGetData;
- // Download as much as possible, from earliest to latest.
- for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- // Can't download any more from this peer
- break;
- }
- uint32_t nFetchFlags = GetFetchFlags(pfrom);
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- BlockRequested(pfrom.GetId(), *pindex);
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom.GetId());
- }
- if (vGetData.size() > 1) {
- LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
- pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
- }
- if (vGetData.size() > 0) {
- if (!m_ignore_incoming_txs &&
- nodestate->m_provides_cmpctblocks &&
- vGetData.size() == 1 &&
- mapBlocksInFlight.size() == 1 &&
- pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // In any case, we want to download using a compact block, not a regular one
- vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
- }
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- }
- }
- }
- // If we're in IBD, we want outbound peers that will serve us a useful
- // chain. Disconnect peers that are on chains with insufficient work.
- if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
- // When nCount < MAX_HEADERS_RESULTS, we know we have no more
- // headers to fetch from this peer.
- if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
- // This peer has too little work on their headers chain to help
- // us sync -- disconnect if it is an outbound disconnection
- // candidate.
- // Note: We compare their tip to nMinimumChainWork (rather than
- // m_chainman.ActiveChain().Tip()) because we won't start block download
- // until we have a headers chain that has at least
- // nMinimumChainWork, even if a peer has a chain past our tip,
- // as an anti-DoS measure.
- if (pfrom.IsOutboundOrBlockRelayConn()) {
- LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
- pfrom.fDisconnect = true;
- }
- }
- }
-
- // If this is an outbound full-relay peer, check to see if we should protect
- // it from the bad/lagging chain logic.
- // Note that outbound block-relay peers are excluded from this protection, and
- // thus always subject to eviction under the bad/lagging chain logic.
- // See ChainSyncTimeoutState.
- if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
- if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
- LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
- nodestate->m_chain_sync.m_protect = true;
- ++m_outbound_peers_with_protect_from_disconnect;
- }
+ pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
}
}
+ UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
+
+ // Consider immediately downloading blocks.
+ HeadersDirectFetchBlocks(pfrom, pindexLast);
+
return;
}
@@ -3001,7 +3098,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (vAddr.size() > MAX_ADDR_TO_SEND)
{
- Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
+ Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
return;
}
@@ -3015,7 +3112,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
// Don't increment bucket if it's already full
const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
- const double increment = CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
+ const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
}
peer->m_addr_token_timestamp = current_time;
@@ -3082,7 +3179,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
- Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
+ Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size()));
return;
}
@@ -3138,8 +3235,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
if (best_block != nullptr) {
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *best_block));
- LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", m_chainman.m_best_header->nHeight, best_block->ToString(), pfrom.GetId());
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer)) {
+ LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
+ m_chainman.m_best_header->nHeight, best_block->ToString(),
+ pfrom.GetId());
+ }
}
return;
@@ -3150,7 +3250,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
- Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
+ Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size()));
return;
}
@@ -3248,7 +3348,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
}
if (recent_block) {
- SendBlockTransactions(pfrom, *recent_block, req);
+ SendBlockTransactions(pfrom, *peer, *recent_block, req);
return;
}
@@ -3266,7 +3366,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
assert(ret);
- SendBlockTransactions(pfrom, block, req);
+ SendBlockTransactions(pfrom, *peer, block, req);
return;
}
}
@@ -3312,7 +3412,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// others.
if (m_chainman.ActiveTip() == nullptr ||
(m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) {
- LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work\n", pfrom.GetId());
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
+ // Just respond with an empty headers message, to tell the peer to
+ // go away but not treat us as unresponsive.
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>()));
return;
}
@@ -3593,8 +3696,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
- if (!m_chainman.ActiveChainstate().IsInitialBlockDownload())
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
+ if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
+ MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer);
+ }
return;
}
@@ -3685,7 +3789,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status == READ_STATUS_INVALID) {
RemoveBlockRequest(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(pfrom.GetId(), 100, "invalid compact block");
+ Misbehaving(*peer, 100, "invalid compact block");
return;
} else if (status == READ_STATUS_FAILED) {
// Duplicate txindexes, the block is now in-flight, so just request it
@@ -3812,7 +3916,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
if (status == READ_STATUS_INVALID) {
RemoveBlockRequest(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
+ Misbehaving(*peer, 100, "invalid compact block/non-matching block transactions");
return;
} else if (status == READ_STATUS_FAILED) {
// Might have collided, fall back to getdata now :(
@@ -3868,12 +3972,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
+ // Assume that this is in response to any outstanding getheaders
+ // request we may have sent, and clear out the time of our last request
+ peer->m_last_getheaders_timestamp.store(NodeSeconds{});
+
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
- Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
+ Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount));
return;
}
headers.resize(nCount);
@@ -4067,7 +4175,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!filter.IsWithinSizeConstraints())
{
// There is no excuse for sending a too-large filter
- Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
+ Misbehaving(*peer, 100, "too-large bloom filter");
} else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
{
LOCK(tx_relay->m_bloom_filter_mutex);
@@ -4075,6 +4183,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
tx_relay->m_relay_txs = true;
}
pfrom.m_bloom_filter_loaded = true;
+ pfrom.m_relays_txs = true;
}
return;
}
@@ -4102,7 +4211,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
if (bad) {
- Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
+ Misbehaving(*peer, 100, "bad filteradd message");
}
return;
}
@@ -4295,7 +4404,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
return fMoreWork;
}
-void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds)
+void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
{
AssertLockHeld(cs_main);
@@ -4333,10 +4442,15 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_
pto.fDisconnect = true;
} else {
assert(state.m_chain_sync.m_work_header);
+ // Here, we assume that the getheaders message goes out,
+ // because it'll either go out or be skipped because of a
+ // getheaders in-flight already, in which case the peer should
+ // still respond to us with a sufficiently high work chain tip.
+ MaybeSendGetHeaders(pto,
+ m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev),
+ peer);
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
- m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
- constexpr auto HEADERS_RESPONSE_TIME{2min};
// Bump the timeout to allow a response, which could clear the timeout
// (if the response shows the peer has synced), reset the timeout (if
// the peer syncs to the required work but not to our tip), or result
@@ -4599,7 +4713,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi
// transactions to us, regardless of feefilter state.
if (pto.IsBlockOnlyConn()) return;
- CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
+ CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
@@ -4744,15 +4858,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
- state.fSyncStarted = true;
- state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
- (
- // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
- // to maintain precision
- std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
- (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
- );
- nSyncStarted++;
const CBlockIndex* pindexStart = m_chainman.m_best_header;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
@@ -4763,8 +4868,19 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
- LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
- m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256()));
+ if (MaybeSendGetHeaders(*pto, m_chainman.ActiveChain().GetLocator(pindexStart), *peer)) {
+ LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
+
+ state.fSyncStarted = true;
+ state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
+ (
+ // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
+ // to maintain precision
+ std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
+ (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
+ );
+ nSyncStarted++;
+ }
}
}
@@ -4772,7 +4888,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Try sending block announcements via headers
//
{
- // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
+ // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
// list of block hashes we're relaying, and our peer wants
// headers announcements, then find the first header
// not yet known to our peer but would connect, and send.
@@ -5110,7 +5226,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check that outbound peers have reasonable chains
// GetTime() is used by this anti-DoS logic so we can test this using mocktime
- ConsiderEviction(*pto, GetTime<std::chrono::seconds>());
+ ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
//
// Message: getdata (blocks)
diff --git a/src/net_processing.h b/src/net_processing.h
index d5c73e6c79..5fbae98c27 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -71,12 +71,8 @@ public:
/** Set the best height */
virtual void SetBestHeight(int height) = 0;
- /**
- * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
- * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
- * Public for unit testing.
- */
- virtual void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) = 0;
+ /* Public for unit testing. */
+ virtual void UnitTestMisbehaving(NodeId peer_id, int howmuch) = 0;
/**
* Evict extra outbound peers. If we think our tip may be stale, connect to an extra outbound.
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 1905a4df29..3c085ae6fb 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -653,8 +653,12 @@ public:
}
void getPackageLimits(unsigned int& limit_ancestor_count, unsigned int& limit_descendant_count) override
{
- limit_ancestor_count = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- limit_descendant_count = gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
+ const CTxMemPool::Limits default_limits{};
+
+ const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_limits : default_limits};
+
+ limit_ancestor_count = limits.ancestor_count;
+ limit_descendant_count = limits.descendant_count;
}
bool checkChainLimits(const CTransactionRef& tx) override
{
@@ -662,15 +666,12 @@ public:
LockPoints lp;
CTxMemPoolEntry entry(tx, 0, 0, 0, false, 0, lp);
CTxMemPool::setEntries ancestors;
- auto limit_ancestor_count = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- auto limit_ancestor_size = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
- auto limit_descendant_count = gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
- auto limit_descendant_size = gArgs.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000;
+ const CTxMemPool::Limits& limits{m_node.mempool->m_limits};
std::string unused_error_string;
LOCK(m_node.mempool->cs);
return m_node.mempool->CalculateMemPoolAncestors(
- entry, ancestors, limit_ancestor_count, limit_ancestor_size,
- limit_descendant_count, limit_descendant_size, unused_error_string);
+ entry, ancestors, limits.ancestor_count, limits.ancestor_size_vbytes,
+ limits.descendant_count, limits.descendant_size_vbytes, unused_error_string);
}
CFeeRate estimateSmartFee(int num_blocks, bool conservative, FeeCalculation* calc) override
{
@@ -685,7 +686,7 @@ public:
CFeeRate mempoolMinFee() override
{
if (!m_node.mempool) return {};
- return m_node.mempool->GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
+ return m_node.mempool->GetMinFee();
}
CFeeRate relayMinFee() override { return ::minRelayTxFee; }
CFeeRate relayIncrementalFee() override { return ::incrementalRelayFee; }
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index b39632364f..27a6ab221f 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -31,8 +31,6 @@
#include <stdexcept>
#include <utility>
-static const char* FEE_ESTIMATES_FILENAME = "fee_estimates.dat";
-
static constexpr double INF_FEERATE = 1e99;
std::string StringForFeeEstimateHorizon(FeeEstimateHorizon horizon)
@@ -529,8 +527,8 @@ bool CBlockPolicyEstimator::_removeTx(const uint256& hash, bool inBlock)
}
}
-CBlockPolicyEstimator::CBlockPolicyEstimator()
- : nBestSeenHeight(0), firstRecordedHeight(0), historicalFirst(0), historicalBest(0), trackedTxs(0), untrackedTxs(0)
+CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath)
+ : m_estimation_filepath{estimation_filepath}, nBestSeenHeight{0}, firstRecordedHeight{0}, historicalFirst{0}, historicalBest{0}, trackedTxs{0}, untrackedTxs{0}
{
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
size_t bucketIndex = 0;
@@ -548,10 +546,9 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
longStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
// If the fee estimation file is present, read recorded estimations
- fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
- CAutoFile est_file(fsbridge::fopen(est_filepath, "rb"), SER_DISK, CLIENT_VERSION);
+ CAutoFile est_file(fsbridge::fopen(m_estimation_filepath, "rb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Read(est_file)) {
- LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(est_filepath));
+ LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
}
}
@@ -907,10 +904,9 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, FeeCalculation
void CBlockPolicyEstimator::Flush() {
FlushUnconfirmed();
- fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
- CAutoFile est_file(fsbridge::fopen(est_filepath, "wb"), SER_DISK, CLIENT_VERSION);
+ CAutoFile est_file(fsbridge::fopen(m_estimation_filepath, "wb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Write(est_file)) {
- LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(est_filepath));
+ LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
}
}
diff --git a/src/policy/fees.h b/src/policy/fees.h
index dea1e1d31b..9ee5c2938a 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -6,6 +6,7 @@
#define BITCOIN_POLICY_FEES_H
#include <consensus/amount.h>
+#include <fs.h>
#include <policy/feerate.h>
#include <random.h>
#include <sync.h>
@@ -179,9 +180,10 @@ private:
*/
static constexpr double FEE_SPACING = 1.05;
+ const fs::path m_estimation_filepath;
public:
/** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */
- CBlockPolicyEstimator();
+ CBlockPolicyEstimator(const fs::path& estimation_filepath);
~CBlockPolicyEstimator();
/** Process all the transactions that have been included in a block */
diff --git a/src/policy/fees_args.cpp b/src/policy/fees_args.cpp
new file mode 100644
index 0000000000..a3531153b5
--- /dev/null
+++ b/src/policy/fees_args.cpp
@@ -0,0 +1,12 @@
+#include <policy/fees_args.h>
+
+#include <util/system.h>
+
+namespace {
+const char* FEE_ESTIMATES_FILENAME = "fee_estimates.dat";
+} // namespace
+
+fs::path FeeestPath(const ArgsManager& argsman)
+{
+ return argsman.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
+}
diff --git a/src/policy/fees_args.h b/src/policy/fees_args.h
new file mode 100644
index 0000000000..6b65ce0aa9
--- /dev/null
+++ b/src/policy/fees_args.h
@@ -0,0 +1,15 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_POLICY_FEES_ARGS_H
+#define BITCOIN_POLICY_FEES_ARGS_H
+
+#include <fs.h>
+
+class ArgsManager;
+
+/** @return The fee estimates data file path. */
+fs::path FeeestPath(const ArgsManager& argsman);
+
+#endif // BITCOIN_POLICY_FEES_ARGS_H
diff --git a/src/policy/packages.h b/src/policy/packages.h
index 564ff50d29..36c70e9e66 100644
--- a/src/policy/packages.h
+++ b/src/policy/packages.h
@@ -25,8 +25,8 @@ static_assert(MAX_PACKAGE_SIZE * WITNESS_SCALE_FACTOR * 1000 >= MAX_STANDARD_TX_
// defaults reflect this constraint.
static_assert(DEFAULT_DESCENDANT_LIMIT >= MAX_PACKAGE_COUNT);
static_assert(DEFAULT_ANCESTOR_LIMIT >= MAX_PACKAGE_COUNT);
-static_assert(DEFAULT_ANCESTOR_SIZE_LIMIT >= MAX_PACKAGE_SIZE);
-static_assert(DEFAULT_DESCENDANT_SIZE_LIMIT >= MAX_PACKAGE_SIZE);
+static_assert(DEFAULT_ANCESTOR_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE);
+static_assert(DEFAULT_DESCENDANT_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE);
/** A "reason" why a package was invalid. It may be that one or more of the included
* transactions is invalid or the package itself violates our rules.
diff --git a/src/policy/policy.h b/src/policy/policy.h
index cd46652efc..cd98a601a3 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -31,8 +31,6 @@ static constexpr unsigned int MIN_STANDARD_TX_NONWITNESS_SIZE{82};
static constexpr unsigned int MAX_P2SH_SIGOPS{15};
/** The maximum number of sigops we're willing to relay/mine in a single tx */
static constexpr unsigned int MAX_STANDARD_TX_SIGOPS_COST{MAX_BLOCK_SIGOPS_COST/5};
-/** Default for -maxmempool, maximum megabytes of mempool memory usage */
-static constexpr unsigned int DEFAULT_MAX_MEMPOOL_SIZE{300};
/** Default for -incrementalrelayfee, which sets the minimum feerate increase for mempool limiting or BIP 125 replacement **/
static constexpr unsigned int DEFAULT_INCREMENTAL_RELAY_FEE{1000};
/** Default for -bytespersigop */
@@ -60,11 +58,11 @@ static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE{1000};
/** Default for -limitancestorcount, max number of in-mempool ancestors */
static constexpr unsigned int DEFAULT_ANCESTOR_LIMIT{25};
/** Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors */
-static constexpr unsigned int DEFAULT_ANCESTOR_SIZE_LIMIT{101};
+static constexpr unsigned int DEFAULT_ANCESTOR_SIZE_LIMIT_KVB{101};
/** Default for -limitdescendantcount, max number of in-mempool descendants */
static constexpr unsigned int DEFAULT_DESCENDANT_LIMIT{25};
/** Default for -limitdescendantsize, maximum kilobytes of in-mempool descendants */
-static constexpr unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT{101};
+static constexpr unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT_KVB{101};
/**
* An extra transaction can be added to a package, as long as it only has one
* ancestor and is no larger than this. Not really any reason to make this
@@ -101,8 +99,7 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS{MANDATORY_SCRIPT_VERI
static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS{STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS};
/** Used as the flags parameter to sequence and nLocktime checks in non-consensus code. */
-static constexpr unsigned int STANDARD_LOCKTIME_VERIFY_FLAGS{LOCKTIME_VERIFY_SEQUENCE |
- LOCKTIME_MEDIAN_TIME_PAST};
+static constexpr unsigned int STANDARD_LOCKTIME_VERIFY_FLAGS{LOCKTIME_VERIFY_SEQUENCE};
CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee);
diff --git a/src/psbt.cpp b/src/psbt.cpp
index c1c8a385cc..36fec74bc9 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -113,6 +113,24 @@ void PSBTInput::FillSignatureData(SignatureData& sigdata) const
for (const auto& key_pair : hd_keypaths) {
sigdata.misc_pubkeys.emplace(key_pair.first.GetID(), key_pair);
}
+ if (!m_tap_key_sig.empty()) {
+ sigdata.taproot_key_path_sig = m_tap_key_sig;
+ }
+ for (const auto& [pubkey_leaf, sig] : m_tap_script_sigs) {
+ sigdata.taproot_script_sigs.emplace(pubkey_leaf, sig);
+ }
+ if (!m_tap_internal_key.IsNull()) {
+ sigdata.tr_spenddata.internal_key = m_tap_internal_key;
+ }
+ if (!m_tap_merkle_root.IsNull()) {
+ sigdata.tr_spenddata.merkle_root = m_tap_merkle_root;
+ }
+ for (const auto& [leaf_script, control_block] : m_tap_scripts) {
+ sigdata.tr_spenddata.scripts.emplace(leaf_script, control_block);
+ }
+ for (const auto& [pubkey, leaf_origin] : m_tap_bip32_paths) {
+ sigdata.taproot_misc_pubkeys.emplace(pubkey, leaf_origin);
+ }
}
void PSBTInput::FromSignatureData(const SignatureData& sigdata)
@@ -142,13 +160,30 @@ void PSBTInput::FromSignatureData(const SignatureData& sigdata)
for (const auto& entry : sigdata.misc_pubkeys) {
hd_keypaths.emplace(entry.second);
}
+ if (!sigdata.taproot_key_path_sig.empty()) {
+ m_tap_key_sig = sigdata.taproot_key_path_sig;
+ }
+ for (const auto& [pubkey_leaf, sig] : sigdata.taproot_script_sigs) {
+ m_tap_script_sigs.emplace(pubkey_leaf, sig);
+ }
+ if (!sigdata.tr_spenddata.internal_key.IsNull()) {
+ m_tap_internal_key = sigdata.tr_spenddata.internal_key;
+ }
+ if (!sigdata.tr_spenddata.merkle_root.IsNull()) {
+ m_tap_merkle_root = sigdata.tr_spenddata.merkle_root;
+ }
+ for (const auto& [leaf_script, control_block] : sigdata.tr_spenddata.scripts) {
+ m_tap_scripts.emplace(leaf_script, control_block);
+ }
+ for (const auto& [pubkey, leaf_origin] : sigdata.taproot_misc_pubkeys) {
+ m_tap_bip32_paths.emplace(pubkey, leaf_origin);
+ }
}
void PSBTInput::Merge(const PSBTInput& input)
{
if (!non_witness_utxo && input.non_witness_utxo) non_witness_utxo = input.non_witness_utxo;
if (witness_utxo.IsNull() && !input.witness_utxo.IsNull()) {
- // TODO: For segwit v1, we will want to clear out the non-witness utxo when setting a witness one. For v0 and non-segwit, this is not safe
witness_utxo = input.witness_utxo;
}
@@ -159,11 +194,17 @@ void PSBTInput::Merge(const PSBTInput& input)
hash256_preimages.insert(input.hash256_preimages.begin(), input.hash256_preimages.end());
hd_keypaths.insert(input.hd_keypaths.begin(), input.hd_keypaths.end());
unknown.insert(input.unknown.begin(), input.unknown.end());
+ m_tap_script_sigs.insert(input.m_tap_script_sigs.begin(), input.m_tap_script_sigs.end());
+ m_tap_scripts.insert(input.m_tap_scripts.begin(), input.m_tap_scripts.end());
+ m_tap_bip32_paths.insert(input.m_tap_bip32_paths.begin(), input.m_tap_bip32_paths.end());
if (redeem_script.empty() && !input.redeem_script.empty()) redeem_script = input.redeem_script;
if (witness_script.empty() && !input.witness_script.empty()) witness_script = input.witness_script;
if (final_script_sig.empty() && !input.final_script_sig.empty()) final_script_sig = input.final_script_sig;
if (final_script_witness.IsNull() && !input.final_script_witness.IsNull()) final_script_witness = input.final_script_witness;
+ if (m_tap_key_sig.empty() && !input.m_tap_key_sig.empty()) m_tap_key_sig = input.m_tap_key_sig;
+ if (m_tap_internal_key.IsNull() && !input.m_tap_internal_key.IsNull()) m_tap_internal_key = input.m_tap_internal_key;
+ if (m_tap_merkle_root.IsNull() && !input.m_tap_merkle_root.IsNull()) m_tap_merkle_root = input.m_tap_merkle_root;
}
void PSBTOutput::FillSignatureData(SignatureData& sigdata) const
@@ -177,6 +218,15 @@ void PSBTOutput::FillSignatureData(SignatureData& sigdata) const
for (const auto& key_pair : hd_keypaths) {
sigdata.misc_pubkeys.emplace(key_pair.first.GetID(), key_pair);
}
+ if (m_tap_tree.has_value() && m_tap_internal_key.IsFullyValid()) {
+ TaprootSpendData spenddata = m_tap_tree->GetSpendData();
+
+ sigdata.tr_spenddata.internal_key = m_tap_internal_key;
+ sigdata.tr_spenddata.Merge(spenddata);
+ }
+ for (const auto& [pubkey, leaf_origin] : m_tap_bip32_paths) {
+ sigdata.taproot_misc_pubkeys.emplace(pubkey, leaf_origin);
+ }
}
void PSBTOutput::FromSignatureData(const SignatureData& sigdata)
@@ -190,6 +240,15 @@ void PSBTOutput::FromSignatureData(const SignatureData& sigdata)
for (const auto& entry : sigdata.misc_pubkeys) {
hd_keypaths.emplace(entry.second);
}
+ if (!sigdata.tr_spenddata.internal_key.IsNull()) {
+ m_tap_internal_key = sigdata.tr_spenddata.internal_key;
+ }
+ if (sigdata.tr_builder.has_value()) {
+ m_tap_tree = sigdata.tr_builder;
+ }
+ for (const auto& [pubkey, leaf_origin] : sigdata.taproot_misc_pubkeys) {
+ m_tap_bip32_paths.emplace(pubkey, leaf_origin);
+ }
}
bool PSBTOutput::IsNull() const
@@ -201,9 +260,12 @@ void PSBTOutput::Merge(const PSBTOutput& output)
{
hd_keypaths.insert(output.hd_keypaths.begin(), output.hd_keypaths.end());
unknown.insert(output.unknown.begin(), output.unknown.end());
+ m_tap_bip32_paths.insert(output.m_tap_bip32_paths.begin(), output.m_tap_bip32_paths.end());
if (redeem_script.empty() && !output.redeem_script.empty()) redeem_script = output.redeem_script;
if (witness_script.empty() && !output.witness_script.empty()) witness_script = output.witness_script;
+ if (m_tap_internal_key.IsNull() && !output.m_tap_internal_key.IsNull()) m_tap_internal_key = output.m_tap_internal_key;
+ if (m_tap_tree.has_value() && !output.m_tap_tree.has_value()) m_tap_tree = output.m_tap_tree;
}
bool PSBTInputSigned(const PSBTInput& input)
{
@@ -313,10 +375,11 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction&
input.FromSignatureData(sigdata);
// If we have a witness signature, put a witness UTXO.
- // TODO: For segwit v1, we should remove the non_witness_utxo
if (sigdata.witness) {
input.witness_utxo = utxo;
- // input.non_witness_utxo = nullptr;
+ // We can remove the non_witness_utxo if and only if there are no non-segwit or segwit v0
+ // inputs in this transaction. Since this requires inspecting the entire transaction, this
+ // is something for the caller to deal with (i.e. FillPSBT).
}
// Fill in the missing info
diff --git a/src/psbt.h b/src/psbt.h
index 4a6d41076f..a143a99988 100644
--- a/src/psbt.h
+++ b/src/psbt.h
@@ -40,12 +40,21 @@ static constexpr uint8_t PSBT_IN_RIPEMD160 = 0x0A;
static constexpr uint8_t PSBT_IN_SHA256 = 0x0B;
static constexpr uint8_t PSBT_IN_HASH160 = 0x0C;
static constexpr uint8_t PSBT_IN_HASH256 = 0x0D;
+static constexpr uint8_t PSBT_IN_TAP_KEY_SIG = 0x13;
+static constexpr uint8_t PSBT_IN_TAP_SCRIPT_SIG = 0x14;
+static constexpr uint8_t PSBT_IN_TAP_LEAF_SCRIPT = 0x15;
+static constexpr uint8_t PSBT_IN_TAP_BIP32_DERIVATION = 0x16;
+static constexpr uint8_t PSBT_IN_TAP_INTERNAL_KEY = 0x17;
+static constexpr uint8_t PSBT_IN_TAP_MERKLE_ROOT = 0x18;
static constexpr uint8_t PSBT_IN_PROPRIETARY = 0xFC;
// Output types
static constexpr uint8_t PSBT_OUT_REDEEMSCRIPT = 0x00;
static constexpr uint8_t PSBT_OUT_WITNESSSCRIPT = 0x01;
static constexpr uint8_t PSBT_OUT_BIP32_DERIVATION = 0x02;
+static constexpr uint8_t PSBT_OUT_TAP_INTERNAL_KEY = 0x05;
+static constexpr uint8_t PSBT_OUT_TAP_TREE = 0x06;
+static constexpr uint8_t PSBT_OUT_TAP_BIP32_DERIVATION = 0x07;
static constexpr uint8_t PSBT_OUT_PROPRIETARY = 0xFC;
// The separator is 0x00. Reading this in means that the unserializer can interpret it
@@ -97,22 +106,30 @@ void UnserializeFromVector(Stream& s, X&... args)
}
}
-// Deserialize an individual HD keypath to a stream
+// Deserialize bytes of given length from the stream as a KeyOriginInfo
template<typename Stream>
-void DeserializeHDKeypath(Stream& s, KeyOriginInfo& hd_keypath)
+KeyOriginInfo DeserializeKeyOrigin(Stream& s, uint64_t length)
{
// Read in key path
- uint64_t value_len = ReadCompactSize(s);
- if (value_len % 4 || value_len == 0) {
+ if (length % 4 || length == 0) {
throw std::ios_base::failure("Invalid length for HD key path");
}
+ KeyOriginInfo hd_keypath;
s >> hd_keypath.fingerprint;
- for (unsigned int i = 4; i < value_len; i += sizeof(uint32_t)) {
+ for (unsigned int i = 4; i < length; i += sizeof(uint32_t)) {
uint32_t index;
s >> index;
hd_keypath.path.push_back(index);
}
+ return hd_keypath;
+}
+
+// Deserialize a length prefixed KeyOriginInfo from a stream
+template<typename Stream>
+void DeserializeHDKeypath(Stream& s, KeyOriginInfo& hd_keypath)
+{
+ hd_keypath = DeserializeKeyOrigin(s, ReadCompactSize(s));
}
// Deserialize HD keypaths into a map
@@ -139,17 +156,24 @@ void DeserializeHDKeypaths(Stream& s, const std::vector<unsigned char>& key, std
hd_keypaths.emplace(pubkey, std::move(keypath));
}
-// Serialize an individual HD keypath to a stream
+// Serialize a KeyOriginInfo to a stream
template<typename Stream>
-void SerializeHDKeypath(Stream& s, KeyOriginInfo hd_keypath)
+void SerializeKeyOrigin(Stream& s, KeyOriginInfo hd_keypath)
{
- WriteCompactSize(s, (hd_keypath.path.size() + 1) * sizeof(uint32_t));
s << hd_keypath.fingerprint;
for (const auto& path : hd_keypath.path) {
s << path;
}
}
+// Serialize a length prefixed KeyOriginInfo to a stream
+template<typename Stream>
+void SerializeHDKeypath(Stream& s, KeyOriginInfo hd_keypath)
+{
+ WriteCompactSize(s, (hd_keypath.path.size() + 1) * sizeof(uint32_t));
+ SerializeKeyOrigin(s, hd_keypath);
+}
+
// Serialize HD keypaths to a stream from a map
template<typename Stream>
void SerializeHDKeypaths(Stream& s, const std::map<CPubKey, KeyOriginInfo>& hd_keypaths, CompactSizeWriter type)
@@ -178,6 +202,15 @@ struct PSBTInput
std::map<uint256, std::vector<unsigned char>> sha256_preimages;
std::map<uint160, std::vector<unsigned char>> hash160_preimages;
std::map<uint256, std::vector<unsigned char>> hash256_preimages;
+
+ // Taproot fields
+ std::vector<unsigned char> m_tap_key_sig;
+ std::map<std::pair<XOnlyPubKey, uint256>, std::vector<unsigned char>> m_tap_script_sigs;
+ std::map<std::pair<CScript, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> m_tap_scripts;
+ std::map<XOnlyPubKey, std::pair<std::set<uint256>, KeyOriginInfo>> m_tap_bip32_paths;
+ XOnlyPubKey m_tap_internal_key;
+ uint256 m_tap_merkle_root;
+
std::map<std::vector<unsigned char>, std::vector<unsigned char>> unknown;
std::set<PSBTProprietary> m_proprietary;
std::optional<int> sighash_type;
@@ -252,6 +285,53 @@ struct PSBTInput
SerializeToVector(s, CompactSizeWriter(PSBT_IN_HASH256), Span{hash});
s << preimage;
}
+
+ // Write taproot key sig
+ if (!m_tap_key_sig.empty()) {
+ SerializeToVector(s, PSBT_IN_TAP_KEY_SIG);
+ s << m_tap_key_sig;
+ }
+
+ // Write taproot script sigs
+ for (const auto& [pubkey_leaf, sig] : m_tap_script_sigs) {
+ const auto& [xonly, leaf_hash] = pubkey_leaf;
+ SerializeToVector(s, PSBT_IN_TAP_SCRIPT_SIG, xonly, leaf_hash);
+ s << sig;
+ }
+
+ // Write taproot leaf scripts
+ for (const auto& [leaf, control_blocks] : m_tap_scripts) {
+ const auto& [script, leaf_ver] = leaf;
+ for (const auto& control_block : control_blocks) {
+ SerializeToVector(s, PSBT_IN_TAP_LEAF_SCRIPT, Span{control_block});
+ std::vector<unsigned char> value_v(script.begin(), script.end());
+ value_v.push_back((uint8_t)leaf_ver);
+ s << value_v;
+ }
+ }
+
+ // Write taproot bip32 keypaths
+ for (const auto& [xonly, leaf_origin] : m_tap_bip32_paths) {
+ const auto& [leaf_hashes, origin] = leaf_origin;
+ SerializeToVector(s, PSBT_IN_TAP_BIP32_DERIVATION, xonly);
+ std::vector<unsigned char> value;
+ CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0);
+ s_value << leaf_hashes;
+ SerializeKeyOrigin(s_value, origin);
+ s << value;
+ }
+
+ // Write taproot internal key
+ if (!m_tap_internal_key.IsNull()) {
+ SerializeToVector(s, PSBT_IN_TAP_INTERNAL_KEY);
+ s << ToByteVector(m_tap_internal_key);
+ }
+
+ // Write taproot merkle root
+ if (!m_tap_merkle_root.IsNull()) {
+ SerializeToVector(s, PSBT_IN_TAP_MERKLE_ROOT);
+ SerializeToVector(s, m_tap_merkle_root);
+ }
}
// Write script sig
@@ -488,6 +568,103 @@ struct PSBTInput
hash256_preimages.emplace(hash, std::move(preimage));
break;
}
+ case PSBT_IN_TAP_KEY_SIG:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot key signature already provided");
+ } else if (key.size() != 1) {
+ throw std::ios_base::failure("Input Taproot key signature key is more than one byte type");
+ }
+ s >> m_tap_key_sig;
+ if (m_tap_key_sig.size() < 64) {
+ throw std::ios_base::failure("Input Taproot key path signature is shorter than 64 bytes");
+ } else if (m_tap_key_sig.size() > 65) {
+ throw std::ios_base::failure("Input Taproot key path signature is longer than 65 bytes");
+ }
+ break;
+ }
+ case PSBT_IN_TAP_SCRIPT_SIG:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot script signature already provided");
+ } else if (key.size() != 65) {
+ throw std::ios_base::failure("Input Taproot script signature key is not 65 bytes");
+ }
+ SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1));
+ XOnlyPubKey xonly;
+ uint256 hash;
+ s_key >> xonly;
+ s_key >> hash;
+ std::vector<unsigned char> sig;
+ s >> sig;
+ if (sig.size() < 64) {
+ throw std::ios_base::failure("Input Taproot script path signature is shorter than 64 bytes");
+ } else if (sig.size() > 65) {
+ throw std::ios_base::failure("Input Taproot script path signature is longer than 65 bytes");
+ }
+ m_tap_script_sigs.emplace(std::make_pair(xonly, hash), sig);
+ break;
+ }
+ case PSBT_IN_TAP_LEAF_SCRIPT:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot leaf script already provided");
+ } else if (key.size() < 34) {
+ throw std::ios_base::failure("Taproot leaf script key is not at least 34 bytes");
+ } else if ((key.size() - 2) % 32 != 0) {
+ throw std::ios_base::failure("Input Taproot leaf script key's control block size is not valid");
+ }
+ std::vector<unsigned char> script_v;
+ s >> script_v;
+ if (script_v.empty()) {
+ throw std::ios_base::failure("Input Taproot leaf script must be at least 1 byte");
+ }
+ uint8_t leaf_ver = script_v.back();
+ script_v.pop_back();
+ const auto leaf_script = std::make_pair(CScript(script_v.begin(), script_v.end()), (int)leaf_ver);
+ m_tap_scripts[leaf_script].insert(std::vector<unsigned char>(key.begin() + 1, key.end()));
+ break;
+ }
+ case PSBT_IN_TAP_BIP32_DERIVATION:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot BIP32 keypath already provided");
+ } else if (key.size() != 33) {
+ throw std::ios_base::failure("Input Taproot BIP32 keypath key is not at 33 bytes");
+ }
+ SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1));
+ XOnlyPubKey xonly;
+ s_key >> xonly;
+ std::set<uint256> leaf_hashes;
+ uint64_t value_len = ReadCompactSize(s);
+ size_t before_hashes = s.size();
+ s >> leaf_hashes;
+ size_t after_hashes = s.size();
+ size_t hashes_len = before_hashes - after_hashes;
+ size_t origin_len = value_len - hashes_len;
+ m_tap_bip32_paths.emplace(xonly, std::make_pair(leaf_hashes, DeserializeKeyOrigin(s, origin_len)));
+ break;
+ }
+ case PSBT_IN_TAP_INTERNAL_KEY:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot internal key already provided");
+ } else if (key.size() != 1) {
+ throw std::ios_base::failure("Input Taproot internal key key is more than one byte type");
+ }
+ UnserializeFromVector(s, m_tap_internal_key);
+ break;
+ }
+ case PSBT_IN_TAP_MERKLE_ROOT:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, input Taproot merkle root already provided");
+ } else if (key.size() != 1) {
+ throw std::ios_base::failure("Input Taproot merkle root key is more than one byte type");
+ }
+ UnserializeFromVector(s, m_tap_merkle_root);
+ break;
+ }
case PSBT_IN_PROPRIETARY:
{
PSBTProprietary this_prop;
@@ -532,6 +709,9 @@ struct PSBTOutput
CScript redeem_script;
CScript witness_script;
std::map<CPubKey, KeyOriginInfo> hd_keypaths;
+ XOnlyPubKey m_tap_internal_key;
+ std::optional<TaprootBuilder> m_tap_tree;
+ std::map<XOnlyPubKey, std::pair<std::set<uint256>, KeyOriginInfo>> m_tap_bip32_paths;
std::map<std::vector<unsigned char>, std::vector<unsigned char>> unknown;
std::set<PSBTProprietary> m_proprietary;
@@ -564,6 +744,40 @@ struct PSBTOutput
s << entry.value;
}
+ // Write taproot internal key
+ if (!m_tap_internal_key.IsNull()) {
+ SerializeToVector(s, PSBT_OUT_TAP_INTERNAL_KEY);
+ s << ToByteVector(m_tap_internal_key);
+ }
+
+ // Write taproot tree
+ if (m_tap_tree.has_value()) {
+ SerializeToVector(s, PSBT_OUT_TAP_TREE);
+ std::vector<unsigned char> value;
+ CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0);
+ const auto& tuples = m_tap_tree->GetTreeTuples();
+ for (const auto& tuple : tuples) {
+ uint8_t depth = std::get<0>(tuple);
+ uint8_t leaf_ver = std::get<1>(tuple);
+ CScript script = std::get<2>(tuple);
+ s_value << depth;
+ s_value << leaf_ver;
+ s_value << script;
+ }
+ s << value;
+ }
+
+ // Write taproot bip32 keypaths
+ for (const auto& [xonly, leaf] : m_tap_bip32_paths) {
+ const auto& [leaf_hashes, origin] = leaf;
+ SerializeToVector(s, PSBT_OUT_TAP_BIP32_DERIVATION, xonly);
+ std::vector<unsigned char> value;
+ CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0);
+ s_value << leaf_hashes;
+ SerializeKeyOrigin(s_value, origin);
+ s << value;
+ }
+
// Write unknown things
for (auto& entry : unknown) {
s << entry.first;
@@ -624,6 +838,59 @@ struct PSBTOutput
DeserializeHDKeypaths(s, key, hd_keypaths);
break;
}
+ case PSBT_OUT_TAP_INTERNAL_KEY:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, output Taproot internal key already provided");
+ } else if (key.size() != 1) {
+ throw std::ios_base::failure("Output Taproot internal key key is more than one byte type");
+ }
+ UnserializeFromVector(s, m_tap_internal_key);
+ break;
+ }
+ case PSBT_OUT_TAP_TREE:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, output Taproot tree already provided");
+ } else if (key.size() != 1) {
+ throw std::ios_base::failure("Output Taproot tree key is more than one byte type");
+ }
+ m_tap_tree.emplace();
+ std::vector<unsigned char> tree_v;
+ s >> tree_v;
+ SpanReader s_tree(s.GetType(), s.GetVersion(), tree_v);
+ while (!s_tree.empty()) {
+ uint8_t depth;
+ uint8_t leaf_ver;
+ CScript script;
+ s_tree >> depth;
+ s_tree >> leaf_ver;
+ s_tree >> script;
+ m_tap_tree->Add((int)depth, script, (int)leaf_ver, true /* track */);
+ }
+ if (!m_tap_tree->IsComplete()) {
+ throw std::ios_base::failure("Output Taproot tree is malformed");
+ }
+ break;
+ }
+ case PSBT_OUT_TAP_BIP32_DERIVATION:
+ {
+ if (!key_lookup.emplace(key).second) {
+ throw std::ios_base::failure("Duplicate Key, output Taproot BIP32 keypath already provided");
+ } else if (key.size() != 33) {
+ throw std::ios_base::failure("Output Taproot BIP32 keypath key is not at 33 bytes");
+ }
+ XOnlyPubKey xonly(uint256({key.begin() + 1, key.begin() + 33}));
+ std::set<uint256> leaf_hashes;
+ uint64_t value_len = ReadCompactSize(s);
+ size_t before_hashes = s.size();
+ s >> leaf_hashes;
+ size_t after_hashes = s.size();
+ size_t hashes_len = before_hashes - after_hashes;
+ size_t origin_len = value_len - hashes_len;
+ m_tap_bip32_paths.emplace(xonly, std::make_pair(leaf_hashes, DeserializeKeyOrigin(s, origin_len)));
+ break;
+ }
case PSBT_OUT_PROPRIETARY:
{
PSBTProprietary this_prop;
@@ -652,6 +919,11 @@ struct PSBTOutput
}
}
+ // Finalize m_tap_tree so that all of the computed things are computed
+ if (m_tap_tree.has_value() && m_tap_tree->IsComplete() && m_tap_internal_key.IsFullyValid()) {
+ m_tap_tree->Finalize(m_tap_internal_key);
+ }
+
if (!found_sep) {
throw std::ios_base::failure("Separator is missing at the end of an output map");
}
diff --git a/src/pubkey.h b/src/pubkey.h
index dfe06f834c..463efe1b00 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -286,6 +286,9 @@ public:
bool operator==(const XOnlyPubKey& other) const { return m_keydata == other.m_keydata; }
bool operator!=(const XOnlyPubKey& other) const { return m_keydata != other.m_keydata; }
bool operator<(const XOnlyPubKey& other) const { return m_keydata < other.m_keydata; }
+
+ //! Implement serialization without length prefixes since it is a fixed length
+ SERIALIZE_METHODS(XOnlyPubKey, obj) { READWRITE(obj.m_keydata); }
};
struct CExtPubKey {
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index f11ddad30f..27d3a1b9e2 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -96,7 +96,11 @@ static void RegisterMetaTypes()
qRegisterMetaType<QMessageBox::Icon>("QMessageBox::Icon");
qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo");
+#if (QT_VERSION < QT_VERSION_CHECK(6, 0, 0))
qRegisterMetaTypeStreamOperators<BitcoinUnit>("BitcoinUnit");
+#else
+ qRegisterMetaType<BitcoinUnit>("BitcoinUnit");
+#endif
}
static QString GetLangTerritory()
@@ -135,21 +139,30 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans
// - First load the translator for the base language, without territory
// - Then load the more specific locale translator
+#if (QT_VERSION < QT_VERSION_CHECK(6, 0, 0))
+ const QString translation_path{QLibraryInfo::location(QLibraryInfo::TranslationsPath)};
+#else
+ const QString translation_path{QLibraryInfo::path(QLibraryInfo::TranslationsPath)};
+#endif
// Load e.g. qt_de.qm
- if (qtTranslatorBase.load("qt_" + lang, QLibraryInfo::location(QLibraryInfo::TranslationsPath)))
+ if (qtTranslatorBase.load("qt_" + lang, translation_path)) {
QApplication::installTranslator(&qtTranslatorBase);
+ }
// Load e.g. qt_de_DE.qm
- if (qtTranslator.load("qt_" + lang_territory, QLibraryInfo::location(QLibraryInfo::TranslationsPath)))
+ if (qtTranslator.load("qt_" + lang_territory, translation_path)) {
QApplication::installTranslator(&qtTranslator);
+ }
// Load e.g. bitcoin_de.qm (shortcut "de" needs to be defined in bitcoin.qrc)
- if (translatorBase.load(lang, ":/translations/"))
+ if (translatorBase.load(lang, ":/translations/")) {
QApplication::installTranslator(&translatorBase);
+ }
// Load e.g. bitcoin_de_DE.qm (shortcut "de_DE" needs to be defined in bitcoin.qrc)
- if (translator.load(lang_territory, ":/translations/"))
+ if (translator.load(lang_territory, ":/translations/")) {
QApplication::installTranslator(&translator);
+ }
}
static bool InitSettings()
@@ -517,9 +530,11 @@ int GuiMain(int argc, char* argv[])
Q_INIT_RESOURCE(bitcoin);
Q_INIT_RESOURCE(bitcoin_locale);
+#if (QT_VERSION < QT_VERSION_CHECK(6, 0, 0))
// Generate high-dpi pixmaps
QApplication::setAttribute(Qt::AA_UseHighDpiPixmaps);
QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
+#endif
#if defined(QT_QPA_PLATFORM_ANDROID)
QApplication::setAttribute(Qt::AA_DontUseNativeMenuBar);
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 6fea8e1cba..d65fc58865 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -999,6 +999,7 @@ void BitcoinGUI::openOptionsDialogWithTab(OptionsDialog::Tab tab)
auto dlg = new OptionsDialog(this, enableWallet);
connect(dlg, &OptionsDialog::quitOnReset, this, &BitcoinGUI::quitRequested);
dlg->setCurrentTab(tab);
+ dlg->setClientModel(clientModel);
dlg->setModel(clientModel->getOptionsModel());
GUIUtil::ShowModalDialogAsynchronously(dlg);
}
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index f3c3af10e0..462b923d61 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -10,6 +10,7 @@
#include <qt/forms/ui_optionsdialog.h>
#include <qt/bitcoinunits.h>
+#include <qt/clientmodel.h>
#include <qt/guiconstants.h>
#include <qt/guiutil.h>
#include <qt/optionsmodel.h>
@@ -168,6 +169,11 @@ OptionsDialog::~OptionsDialog()
delete ui;
}
+void OptionsDialog::setClientModel(ClientModel* client_model)
+{
+ m_client_model = client_model;
+}
+
void OptionsDialog::setModel(OptionsModel *_model)
{
this->model = _model;
@@ -278,14 +284,15 @@ void OptionsDialog::setOkButtonState(bool fState)
void OptionsDialog::on_resetButton_clicked()
{
- if(model)
- {
+ if (model) {
// confirmation dialog
QMessageBox::StandardButton btnRetVal = QMessageBox::question(this, tr("Confirm options reset"),
- tr("Client restart required to activate changes.") + "<br><br>" + tr("Client will be shut down. Do you want to proceed?"),
+ tr("Client restart required to activate changes.") + "<br><br>" +
+ tr("Current settings will be backed up at \"%1\".").arg(m_client_model->dataDir()) + "<br><br>" +
+ tr("Client will be shut down. Do you want to proceed?"),
QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel);
- if(btnRetVal == QMessageBox::Cancel)
+ if (btnRetVal == QMessageBox::Cancel)
return;
/* reset all options and close GUI */
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index 0b7802536c..e5a19d5025 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -8,6 +8,7 @@
#include <QDialog>
#include <QValidator>
+class ClientModel;
class OptionsModel;
class QValidatedLineEdit;
@@ -45,6 +46,7 @@ public:
TAB_NETWORK,
};
+ void setClientModel(ClientModel* client_model);
void setModel(OptionsModel *model);
void setMapper();
void setCurrentTab(OptionsDialog::Tab tab);
@@ -72,6 +74,7 @@ Q_SIGNALS:
private:
Ui::OptionsDialog *ui;
+ ClientModel* m_client_model{nullptr};
OptionsModel *model;
QDataWidgetMapper *mapper;
};
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 9766a237c7..6846e992d4 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -845,7 +845,7 @@ static RPCHelpMan gettxoutsetinfo()
"Note this call may take some time if you are not using coinstatsindex.\n",
{
{"hash_type", RPCArg::Type::STR, RPCArg::Default{"hash_serialized_2"}, "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."},
- {"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
+ {"hash_or_height", RPCArg::Type::NUM, RPCArg::DefaultHint{"the current best block"}, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
{"use_index", RPCArg::Type::BOOL, RPCArg::Default{true}, "Use coinstatsindex, if available."},
},
RPCResult{
@@ -881,6 +881,7 @@ static RPCHelpMan gettxoutsetinfo()
HelpExampleCli("gettxoutsetinfo", R"("none")") +
HelpExampleCli("gettxoutsetinfo", R"("none" 1000)") +
HelpExampleCli("gettxoutsetinfo", R"("none" '"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09"')") +
+ HelpExampleCli("-named gettxoutsetinfo", R"(hash_type='muhash' use_index='false')") +
HelpExampleRpc("gettxoutsetinfo", "") +
HelpExampleRpc("gettxoutsetinfo", R"("none")") +
HelpExampleRpc("gettxoutsetinfo", R"("none", 1000)") +
@@ -917,6 +918,9 @@ static RPCHelpMan gettxoutsetinfo()
throw JSONRPCError(RPC_INVALID_PARAMETER, "hash_serialized_2 hash type cannot be queried for a specific block");
}
+ if (!index_requested) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot set use_index to false when querying for a specific block");
+ }
pindex = ParseHashOrHeight(request.params[1], chainman);
}
@@ -2177,7 +2181,7 @@ static RPCHelpMan getblockfilter()
"\nRetrieve a BIP 157 content filter for a particular block.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hash of the block"},
- {"filtertype", RPCArg::Type::STR, RPCArg::Default{"basic"}, "The type name of the filter"},
+ {"filtertype", RPCArg::Type::STR, RPCArg::Default{BlockFilterTypeName(BlockFilterType::BASIC)}, "The type name of the filter"},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -2192,7 +2196,7 @@ static RPCHelpMan getblockfilter()
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
uint256 block_hash = ParseHashV(request.params[0], "blockhash");
- std::string filtertype_name = "basic";
+ std::string filtertype_name = BlockFilterTypeName(BlockFilterType::BASIC);
if (!request.params[1].isNull()) {
filtertype_name = request.params[1].get_str();
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index ae0d0112ba..9be3ab7df0 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -110,6 +110,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "sendrawtransaction", 1, "maxfeerate" },
{ "testmempoolaccept", 0, "rawtxs" },
{ "testmempoolaccept", 1, "maxfeerate" },
+ { "submitpackage", 0, "package" },
{ "combinerawtransaction", 0, "txs" },
{ "fundrawtransaction", 1, "options" },
{ "fundrawtransaction", 2, "iswitness" },
diff --git a/src/rpc/fees.cpp b/src/rpc/fees.cpp
index 1873bc1587..dd1a6441a0 100644
--- a/src/rpc/fees.cpp
+++ b/src/rpc/fees.cpp
@@ -89,7 +89,7 @@ static RPCHelpMan estimatesmartfee()
FeeCalculation feeCalc;
CFeeRate feeRate{fee_estimator.estimateSmartFee(conf_target, &feeCalc, conservative)};
if (feeRate != CFeeRate(0)) {
- CFeeRate min_mempool_feerate{mempool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000)};
+ CFeeRate min_mempool_feerate{mempool.GetMinFee()};
CFeeRate min_relay_feerate{::minRelayTxFee};
feeRate = std::max({feeRate, min_mempool_feerate, min_relay_feerate});
result.pushKV("feerate", ValueFromAmount(feeRate.GetFeePerK()));
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index 97ec95a166..fbb40ab861 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -5,6 +5,7 @@
#include <rpc/blockchain.h>
+#include <chainparams.h>
#include <core_io.h>
#include <fs.h>
#include <policy/rbf.h>
@@ -657,10 +658,10 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool)
ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize());
ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage());
ret.pushKV("total_fee", ValueFromAmount(pool.GetTotalFee()));
- int64_t maxmempool{gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000};
- ret.pushKV("maxmempool", maxmempool);
- ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee).GetFeePerK()));
+ ret.pushKV("maxmempool", pool.m_max_size_bytes);
+ ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(), ::minRelayTxFee).GetFeePerK()));
ret.pushKV("minrelaytxfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()));
+ ret.pushKV("incrementalrelayfee", ValueFromAmount(::incrementalRelayFee.GetFeePerK()));
ret.pushKV("unbroadcastcount", uint64_t{pool.GetUnbroadcastTxs().size()});
return ret;
}
@@ -668,7 +669,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool)
static RPCHelpMan getmempoolinfo()
{
return RPCHelpMan{"getmempoolinfo",
- "\nReturns details on the active state of the TX memory pool.\n",
+ "Returns details on the active state of the TX memory pool.",
{},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -681,7 +682,8 @@ static RPCHelpMan getmempoolinfo()
{RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"},
{RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kvB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"},
{RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"},
- {RPCResult::Type::NUM, "unbroadcastcount", "Current number of transactions that haven't passed initial broadcast yet"}
+ {RPCResult::Type::NUM, "incrementalrelayfee", "minimum fee rate increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kvB"},
+ {RPCResult::Type::NUM, "unbroadcastcount", "Current number of transactions that haven't passed initial broadcast yet"},
}},
RPCExamples{
HelpExampleCli("getmempoolinfo", "")
@@ -729,6 +731,150 @@ static RPCHelpMan savemempool()
};
}
+static RPCHelpMan submitpackage()
+{
+ return RPCHelpMan{"submitpackage",
+ "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n"
+ "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n"
+ "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n"
+ "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n"
+ "Currently, each transaction is broadcasted individually after submission, which means they must meet other nodes' feerate requirements alone.\n"
+ ,
+ {
+ {"package", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of raw transactions.",
+ {
+ {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
+ },
+ },
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "tx-results", "transaction results keyed by wtxid",
+ {
+ {RPCResult::Type::OBJ, "wtxid", "transaction wtxid", {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ {RPCResult::Type::STR_HEX, "other-wtxid", /*optional=*/true, "The wtxid of a different transaction with the same txid but different witness found in the mempool. This means the submitted transaction was ignored."},
+ {RPCResult::Type::NUM, "vsize", "Virtual transaction size as defined in BIP 141."},
+ {RPCResult::Type::OBJ, "fees", "Transaction fees", {
+ {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT},
+ }},
+ }}
+ }},
+ {RPCResult::Type::STR_AMOUNT, "package-feerate", /*optional=*/true, "package feerate used for feerate checks in " + CURRENCY_UNIT + " per KvB. Excludes transactions which were deduplicated or accepted individually."},
+ {RPCResult::Type::ARR, "replaced-transactions", /*optional=*/true, "List of txids of replaced transactions",
+ {
+ {RPCResult::Type::STR_HEX, "", "The transaction id"},
+ }},
+ },
+ },
+ RPCExamples{
+ HelpExampleCli("testmempoolaccept", "[rawtx1, rawtx2]") +
+ HelpExampleCli("submitpackage", "[rawtx1, rawtx2]")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only");
+ }
+ RPCTypeCheck(request.params, {
+ UniValue::VARR,
+ });
+ const UniValue raw_transactions = request.params[0].get_array();
+ if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER,
+ "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions.");
+ }
+
+ std::vector<CTransactionRef> txns;
+ txns.reserve(raw_transactions.size());
+ for (const auto& rawtx : raw_transactions.getValues()) {
+ CMutableTransaction mtx;
+ if (!DecodeHexTx(mtx, rawtx.get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
+ "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input.");
+ }
+ txns.emplace_back(MakeTransactionRef(std::move(mtx)));
+ }
+
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ CTxMemPool& mempool = EnsureMemPool(node);
+ CChainState& chainstate = EnsureChainman(node).ActiveChainstate();
+ const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false));
+
+ // First catch any errors.
+ switch(package_result.m_state.GetResult()) {
+ case PackageValidationResult::PCKG_RESULT_UNSET: break;
+ case PackageValidationResult::PCKG_POLICY:
+ {
+ throw JSONRPCTransactionError(TransactionError::INVALID_PACKAGE,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_MEMPOOL_ERROR:
+ {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_ERROR,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_TX:
+ {
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ if (it != package_result.m_tx_results.end() && it->second.m_state.IsInvalid()) {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_REJECTED,
+ strprintf("%s failed: %s", tx->GetHash().ToString(), it->second.m_state.GetRejectReason()));
+ }
+ }
+ // If a PCKG_TX error was returned, there must have been an invalid transaction.
+ NONFATAL_UNREACHABLE();
+ }
+ }
+ for (const auto& tx : txns) {
+ size_t num_submitted{0};
+ std::string err_string;
+ const auto err = BroadcastTransaction(node, tx, err_string, 0, true, true);
+ if (err != TransactionError::OK) {
+ throw JSONRPCTransactionError(err,
+ strprintf("transaction broadcast failed: %s (all transactions were submitted, %d transactions were broadcast successfully)",
+ err_string, num_submitted));
+ }
+ }
+ UniValue rpc_result{UniValue::VOBJ};
+ UniValue tx_result_map{UniValue::VOBJ};
+ std::set<uint256> replaced_txids;
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ CHECK_NONFATAL(it != package_result.m_tx_results.end());
+ UniValue result_inner{UniValue::VOBJ};
+ result_inner.pushKV("txid", tx->GetHash().GetHex());
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS) {
+ result_inner.pushKV("other-wtxid", it->second.m_other_wtxid.value().GetHex());
+ }
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::VALID ||
+ it->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY) {
+ result_inner.pushKV("vsize", int64_t{it->second.m_vsize.value()});
+ UniValue fees(UniValue::VOBJ);
+ fees.pushKV("base", ValueFromAmount(it->second.m_base_fees.value()));
+ result_inner.pushKV("fees", fees);
+ if (it->second.m_replaced_transactions.has_value()) {
+ for (const auto& ptx : it->second.m_replaced_transactions.value()) {
+ replaced_txids.insert(ptx->GetHash());
+ }
+ }
+ }
+ tx_result_map.pushKV(tx->GetWitnessHash().GetHex(), result_inner);
+ }
+ rpc_result.pushKV("tx-results", tx_result_map);
+ if (package_result.m_package_feerate.has_value()) {
+ rpc_result.pushKV("package-feerate", ValueFromAmount(package_result.m_package_feerate.value().GetFeePerK()));
+ }
+ UniValue replaced_list(UniValue::VARR);
+ for (const uint256& hash : replaced_txids) replaced_list.push_back(hash.ToString());
+ rpc_result.pushKV("replaced-transactions", replaced_list);
+ return rpc_result;
+ },
+ };
+}
+
void RegisterMempoolRPCCommands(CRPCTable& t)
{
static const CRPCCommand commands[]{
@@ -741,6 +887,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t)
{"blockchain", &getmempoolinfo},
{"blockchain", &getrawmempool},
{"blockchain", &savemempool},
+ {"hidden", &submitpackage},
};
for (const auto& c : commands) {
t.appendCommand(c.name, &c);
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 0a061f2451..fad92629c5 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -206,13 +206,13 @@ static RPCHelpMan getpeerinfo()
obj.pushKV("conntime", count_seconds(stats.m_connected));
obj.pushKV("timeoffset", stats.nTimeOffset);
if (stats.m_last_ping_time > 0us) {
- obj.pushKV("pingtime", CountSecondsDouble(stats.m_last_ping_time));
+ obj.pushKV("pingtime", Ticks<SecondsDouble>(stats.m_last_ping_time));
}
if (stats.m_min_ping_time < std::chrono::microseconds::max()) {
- obj.pushKV("minping", CountSecondsDouble(stats.m_min_ping_time));
+ obj.pushKV("minping", Ticks<SecondsDouble>(stats.m_min_ping_time));
}
if (fStateStats && statestats.m_ping_wait > 0s) {
- obj.pushKV("pingwait", CountSecondsDouble(statestats.m_ping_wait));
+ obj.pushKV("pingwait", Ticks<SecondsDouble>(statestats.m_ping_wait));
}
obj.pushKV("version", stats.nVersion);
// Use the sanitized form of subver here, to avoid tricksy remote peers from
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index b9b8c36bb3..792a1e13b0 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -790,6 +790,43 @@ static RPCHelpMan decodepsbt()
{
{RPCResult::Type::STR, "hash", "The hash and preimage that corresponds to it."},
}},
+ {RPCResult::Type::STR_HEX, "taproot_key_path_sig", /*optional=*/ true, "hex-encoded signature for the Taproot key path spend"},
+ {RPCResult::Type::ARR, "taproot_script_path_sigs", /*optional=*/ true, "",
+ {
+ {RPCResult::Type::OBJ, "signature", /*optional=*/ true, "The signature for the pubkey and leaf hash combination",
+ {
+ {RPCResult::Type::STR, "pubkey", "The x-only pubkey for this signature"},
+ {RPCResult::Type::STR, "leaf_hash", "The leaf hash for this signature"},
+ {RPCResult::Type::STR, "sig", "The signature itself"},
+ }},
+ }},
+ {RPCResult::Type::ARR, "taproot_scripts", /*optional=*/ true, "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "script", "A leaf script"},
+ {RPCResult::Type::NUM, "leaf_ver", "The version number for the leaf script"},
+ {RPCResult::Type::ARR, "control_blocks", "The control blocks for this script",
+ {
+ {RPCResult::Type::STR_HEX, "control_block", "A hex-encoded control block for this script"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::ARR, "taproot_bip32_derivs", /*optional=*/ true, "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The x-only public key this path corresponds to"},
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ {RPCResult::Type::ARR, "leaf_hashes", "The hashes of the leaves this pubkey appears in",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The hash of a leaf this pubkey appears in"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::STR_HEX, "taproot_internal_key", /*optional=*/ true, "The hex-encoded Taproot x-only internal key"},
+ {RPCResult::Type::STR_HEX, "taproot_merkle_root", /*optional=*/ true, "The hex-encoded Taproot merkle root"},
{RPCResult::Type::OBJ_DYN, "unknown", /*optional=*/ true, "The unknown input fields",
{
{RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
@@ -831,7 +868,30 @@ static RPCHelpMan decodepsbt()
{RPCResult::Type::STR, "path", "The path"},
}},
}},
- {RPCResult::Type::OBJ_DYN, "unknown", /*optional=*/true, "The unknown global fields",
+ {RPCResult::Type::STR_HEX, "taproot_internal_key", /*optional=*/ true, "The hex-encoded Taproot x-only internal key"},
+ {RPCResult::Type::ARR, "taproot_tree", /*optional=*/ true, "The tuples that make up the Taproot tree, in depth first search order",
+ {
+ {RPCResult::Type::OBJ, "tuple", /*optional=*/ true, "A single leaf script in the taproot tree",
+ {
+ {RPCResult::Type::NUM, "depth", "The depth of this element in the tree"},
+ {RPCResult::Type::NUM, "leaf_ver", "The version of this leaf"},
+ {RPCResult::Type::STR, "script", "The hex-encoded script itself"},
+ }},
+ }},
+ {RPCResult::Type::ARR, "taproot_bip32_derivs", /*optional=*/ true, "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The x-only public key this path corresponds to"},
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ {RPCResult::Type::ARR, "leaf_hashes", "The hashes of the leaves this pubkey appears in",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The hash of a leaf this pubkey appears in"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", /*optional=*/true, "The unknown output fields",
{
{RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
}},
@@ -1045,6 +1105,72 @@ static RPCHelpMan decodepsbt()
in.pushKV("hash256_preimages", hash256_preimages);
}
+ // Taproot key path signature
+ if (!input.m_tap_key_sig.empty()) {
+ in.pushKV("taproot_key_path_sig", HexStr(input.m_tap_key_sig));
+ }
+
+ // Taproot script path signatures
+ if (!input.m_tap_script_sigs.empty()) {
+ UniValue script_sigs(UniValue::VARR);
+ for (const auto& [pubkey_leaf, sig] : input.m_tap_script_sigs) {
+ const auto& [xonly, leaf_hash] = pubkey_leaf;
+ UniValue sigobj(UniValue::VOBJ);
+ sigobj.pushKV("pubkey", HexStr(xonly));
+ sigobj.pushKV("leaf_hash", HexStr(leaf_hash));
+ sigobj.pushKV("sig", HexStr(sig));
+ script_sigs.push_back(sigobj);
+ }
+ in.pushKV("taproot_script_path_sigs", script_sigs);
+ }
+
+ // Taproot leaf scripts
+ if (!input.m_tap_scripts.empty()) {
+ UniValue tap_scripts(UniValue::VARR);
+ for (const auto& [leaf, control_blocks] : input.m_tap_scripts) {
+ const auto& [script, leaf_ver] = leaf;
+ UniValue script_info(UniValue::VOBJ);
+ script_info.pushKV("script", HexStr(script));
+ script_info.pushKV("leaf_ver", leaf_ver);
+ UniValue control_blocks_univ(UniValue::VARR);
+ for (const auto& control_block : control_blocks) {
+ control_blocks_univ.push_back(HexStr(control_block));
+ }
+ script_info.pushKV("control_blocks", control_blocks_univ);
+ tap_scripts.push_back(script_info);
+ }
+ in.pushKV("taproot_scripts", tap_scripts);
+ }
+
+ // Taproot bip32 keypaths
+ if (!input.m_tap_bip32_paths.empty()) {
+ UniValue keypaths(UniValue::VARR);
+ for (const auto& [xonly, leaf_origin] : input.m_tap_bip32_paths) {
+ const auto& [leaf_hashes, origin] = leaf_origin;
+ UniValue path_obj(UniValue::VOBJ);
+ path_obj.pushKV("pubkey", HexStr(xonly));
+ path_obj.pushKV("master_fingerprint", strprintf("%08x", ReadBE32(origin.fingerprint)));
+ path_obj.pushKV("path", WriteHDKeypath(origin.path));
+ UniValue leaf_hashes_arr(UniValue::VARR);
+ for (const auto& leaf_hash : leaf_hashes) {
+ leaf_hashes_arr.push_back(HexStr(leaf_hash));
+ }
+ path_obj.pushKV("leaf_hashes", leaf_hashes_arr);
+ keypaths.push_back(path_obj);
+ }
+ in.pushKV("taproot_bip32_derivs", keypaths);
+ }
+
+ // Taproot internal key
+ if (!input.m_tap_internal_key.IsNull()) {
+ in.pushKV("taproot_internal_key", HexStr(input.m_tap_internal_key));
+ }
+
+ // Write taproot merkle root
+ if (!input.m_tap_merkle_root.IsNull()) {
+ in.pushKV("taproot_merkle_root", HexStr(input.m_tap_merkle_root));
+ }
+
// Proprietary
if (!input.m_proprietary.empty()) {
UniValue proprietary(UniValue::VARR);
@@ -1103,6 +1229,47 @@ static RPCHelpMan decodepsbt()
out.pushKV("bip32_derivs", keypaths);
}
+ // Taproot internal key
+ if (!output.m_tap_internal_key.IsNull()) {
+ out.pushKV("taproot_internal_key", HexStr(output.m_tap_internal_key));
+ }
+
+ // Taproot tree
+ if (output.m_tap_tree.has_value()) {
+ UniValue tree(UniValue::VARR);
+ const auto& tuples = output.m_tap_tree->GetTreeTuples();
+ for (const auto& tuple : tuples) {
+ uint8_t depth = std::get<0>(tuple);
+ uint8_t leaf_ver = std::get<1>(tuple);
+ CScript script = std::get<2>(tuple);
+ UniValue elem(UniValue::VOBJ);
+ elem.pushKV("depth", (int)depth);
+ elem.pushKV("leaf_ver", (int)leaf_ver);
+ elem.pushKV("script", HexStr(script));
+ tree.push_back(elem);
+ }
+ out.pushKV("taproot_tree", tree);
+ }
+
+ // Taproot bip32 keypaths
+ if (!output.m_tap_bip32_paths.empty()) {
+ UniValue keypaths(UniValue::VARR);
+ for (const auto& [xonly, leaf_origin] : output.m_tap_bip32_paths) {
+ const auto& [leaf_hashes, origin] = leaf_origin;
+ UniValue path_obj(UniValue::VOBJ);
+ path_obj.pushKV("pubkey", HexStr(xonly));
+ path_obj.pushKV("master_fingerprint", strprintf("%08x", ReadBE32(origin.fingerprint)));
+ path_obj.pushKV("path", WriteHDKeypath(origin.path));
+ UniValue leaf_hashes_arr(UniValue::VARR);
+ for (const auto& leaf_hash : leaf_hashes) {
+ leaf_hashes_arr.push_back(HexStr(leaf_hash));
+ }
+ path_obj.pushKV("leaf_hashes", leaf_hashes_arr);
+ keypaths.push_back(path_obj);
+ }
+ out.pushKV("taproot_bip32_derivs", keypaths);
+ }
+
// Proprietary
if (!output.m_proprietary.empty()) {
UniValue proprietary(UniValue::VARR);
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 66ed18045e..e9987d73be 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -11,14 +11,18 @@
#include <util/strencodings.h>
#include <util/string.h>
#include <util/system.h>
+#include <util/time.h>
#include <boost/signals2/signal.hpp>
#include <cassert>
-#include <memory> // for unique_ptr
+#include <chrono>
+#include <memory>
#include <mutex>
#include <unordered_map>
+using SteadyClock = std::chrono::steady_clock;
+
static GlobalMutex g_rpc_warmup_mutex;
static std::atomic<bool> g_rpc_running{false};
static bool fRPCInWarmup GUARDED_BY(g_rpc_warmup_mutex) = true;
@@ -33,7 +37,7 @@ static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& req
struct RPCCommandExecutionInfo
{
std::string method;
- int64_t start;
+ SteadyClock::time_point start;
};
struct RPCServerInfo
@@ -50,7 +54,7 @@ struct RPCCommandExecution
explicit RPCCommandExecution(const std::string& method)
{
LOCK(g_rpc_server_info.mutex);
- it = g_rpc_server_info.active_commands.insert(g_rpc_server_info.active_commands.end(), {method, GetTimeMicros()});
+ it = g_rpc_server_info.active_commands.insert(g_rpc_server_info.active_commands.end(), {method, SteadyClock::now()});
}
~RPCCommandExecution()
{
@@ -231,7 +235,7 @@ static RPCHelpMan getrpcinfo()
for (const RPCCommandExecutionInfo& info : g_rpc_server_info.active_commands) {
UniValue entry(UniValue::VOBJ);
entry.pushKV("method", info.method);
- entry.pushKV("duration", GetTimeMicros() - info.start);
+ entry.pushKV("duration", int64_t{Ticks<std::chrono::microseconds>(SteadyClock::now() - info.start)});
active_commands.push_back(entry);
}
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index cece0b60ce..ca0170c84b 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -882,7 +882,7 @@ protected:
if (!xpk.IsFullyValid()) return {};
builder.Finalize(xpk);
WitnessV1Taproot output = builder.GetOutput();
- out.tr_spenddata[output].Merge(builder.GetSpendData());
+ out.tr_trees[output] = builder;
out.pubkeys.emplace(keys[0].GetID(), keys[0]);
return Vector(GetScriptForDestination(output));
}
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 2d569d674a..a3681d26cc 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -150,6 +150,7 @@ static bool CreateTaprootScriptSig(const BaseSignatureCreator& creator, Signatur
auto it = sigdata.taproot_script_sigs.find(lookup_key);
if (it != sigdata.taproot_script_sigs.end()) {
sig_out = it->second;
+ return true;
}
if (creator.CreateSchnorrSig(provider, sig_out, pubkey, &leaf_hash, nullptr, sigversion)) {
sigdata.taproot_script_sigs[lookup_key] = sig_out;
@@ -169,6 +170,17 @@ static bool SignTaprootScript(const SigningProvider& provider, const BaseSignatu
// <xonly pubkey> OP_CHECKSIG
if (script.size() == 34 && script[33] == OP_CHECKSIG && script[0] == 0x20) {
XOnlyPubKey pubkey{Span{script}.subspan(1, 32)};
+
+ KeyOriginInfo info;
+ if (provider.GetKeyOriginByXOnly(pubkey, info)) {
+ auto it = sigdata.taproot_misc_pubkeys.find(pubkey);
+ if (it == sigdata.taproot_misc_pubkeys.end()) {
+ sigdata.taproot_misc_pubkeys.emplace(pubkey, std::make_pair(std::set<uint256>({leaf_hash}), info));
+ } else {
+ it->second.first.insert(leaf_hash);
+ }
+ }
+
std::vector<unsigned char> sig;
if (CreateTaprootScriptSig(creator, sigdata, provider, sig, pubkey, leaf_hash, sigversion)) {
result = Vector(std::move(sig));
@@ -205,17 +217,29 @@ static bool SignTaprootScript(const SigningProvider& provider, const BaseSignatu
static bool SignTaproot(const SigningProvider& provider, const BaseSignatureCreator& creator, const WitnessV1Taproot& output, SignatureData& sigdata, std::vector<valtype>& result)
{
TaprootSpendData spenddata;
+ TaprootBuilder builder;
// Gather information about this output.
if (provider.GetTaprootSpendData(output, spenddata)) {
sigdata.tr_spenddata.Merge(spenddata);
}
+ if (provider.GetTaprootBuilder(output, builder)) {
+ sigdata.tr_builder = builder;
+ }
// Try key path spending.
{
+ KeyOriginInfo info;
+ if (provider.GetKeyOriginByXOnly(sigdata.tr_spenddata.internal_key, info)) {
+ auto it = sigdata.taproot_misc_pubkeys.find(sigdata.tr_spenddata.internal_key);
+ if (it == sigdata.taproot_misc_pubkeys.end()) {
+ sigdata.taproot_misc_pubkeys.emplace(sigdata.tr_spenddata.internal_key, std::make_pair(std::set<uint256>(), info));
+ }
+ }
+
std::vector<unsigned char> sig;
if (sigdata.taproot_key_path_sig.size() == 0) {
- if (creator.CreateSchnorrSig(provider, sig, spenddata.internal_key, nullptr, &spenddata.merkle_root, SigVersion::TAPROOT)) {
+ if (creator.CreateSchnorrSig(provider, sig, sigdata.tr_spenddata.internal_key, nullptr, &sigdata.tr_spenddata.merkle_root, SigVersion::TAPROOT)) {
sigdata.taproot_key_path_sig = sig;
}
}
diff --git a/src/script/sign.h b/src/script/sign.h
index 71203d08ec..5e58272154 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -70,10 +70,12 @@ struct SignatureData {
CScript witness_script; ///< The witnessScript (if any) for the input. witnessScripts are used in P2WSH outputs.
CScriptWitness scriptWitness; ///< The scriptWitness of an input. Contains complete signatures or the traditional partial signatures format. scriptWitness is part of a transaction input per BIP 144.
TaprootSpendData tr_spenddata; ///< Taproot spending data.
+ std::optional<TaprootBuilder> tr_builder; ///< Taproot tree used to build tr_spenddata.
std::map<CKeyID, SigPair> signatures; ///< BIP 174 style partial signatures for the input. May contain all signatures necessary for producing a final scriptSig or scriptWitness.
std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>> misc_pubkeys;
std::vector<unsigned char> taproot_key_path_sig; /// Schnorr signature for key path spending
std::map<std::pair<XOnlyPubKey, uint256>, std::vector<unsigned char>> taproot_script_sigs; ///< (Partial) schnorr signatures, indexed by XOnlyPubKey and leaf_hash.
+ std::map<XOnlyPubKey, std::pair<std::set<uint256>, KeyOriginInfo>> taproot_misc_pubkeys; ///< Miscellaneous Taproot pubkeys involved in this input along with their leaf script hashes and key origin data. Also includes the Taproot internal key (may have no leaf script hashes).
std::vector<CKeyID> missing_pubkeys; ///< KeyIDs of pubkeys which could not be found
std::vector<CKeyID> missing_sigs; ///< KeyIDs of pubkeys for signatures which could not be found
uint160 missing_redeem_script; ///< ScriptID of the missing redeemScript (if any)
diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp
index 552934e0eb..c624a17628 100644
--- a/src/script/signingprovider.cpp
+++ b/src/script/signingprovider.cpp
@@ -48,6 +48,10 @@ bool HidingSigningProvider::GetTaprootSpendData(const XOnlyPubKey& output_key, T
{
return m_provider->GetTaprootSpendData(output_key, spenddata);
}
+bool HidingSigningProvider::GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const
+{
+ return m_provider->GetTaprootBuilder(output_key, builder);
+}
bool FlatSigningProvider::GetCScript(const CScriptID& scriptid, CScript& script) const { return LookupHelper(scripts, scriptid, script); }
bool FlatSigningProvider::GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const { return LookupHelper(pubkeys, keyid, pubkey); }
@@ -61,7 +65,16 @@ bool FlatSigningProvider::GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info)
bool FlatSigningProvider::GetKey(const CKeyID& keyid, CKey& key) const { return LookupHelper(keys, keyid, key); }
bool FlatSigningProvider::GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const
{
- return LookupHelper(tr_spenddata, output_key, spenddata);
+ TaprootBuilder builder;
+ if (LookupHelper(tr_trees, output_key, builder)) {
+ spenddata = builder.GetSpendData();
+ return true;
+ }
+ return false;
+}
+bool FlatSigningProvider::GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const
+{
+ return LookupHelper(tr_trees, output_key, builder);
}
FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvider& b)
@@ -75,10 +88,8 @@ FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvide
ret.keys.insert(b.keys.begin(), b.keys.end());
ret.origins = a.origins;
ret.origins.insert(b.origins.begin(), b.origins.end());
- ret.tr_spenddata = a.tr_spenddata;
- for (const auto& [output_key, spenddata] : b.tr_spenddata) {
- ret.tr_spenddata[output_key].Merge(spenddata);
- }
+ ret.tr_trees = a.tr_trees;
+ ret.tr_trees.insert(b.tr_trees.begin(), b.tr_trees.end());
return ret;
}
diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h
index f1bded1a8c..792cc903f2 100644
--- a/src/script/signingprovider.h
+++ b/src/script/signingprovider.h
@@ -25,6 +25,7 @@ public:
virtual bool HaveKey(const CKeyID &address) const { return false; }
virtual bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const { return false; }
virtual bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const { return false; }
+ virtual bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const { return false; }
bool GetKeyByXOnly(const XOnlyPubKey& pubkey, CKey& key) const
{
@@ -67,6 +68,7 @@ public:
bool GetKey(const CKeyID& keyid, CKey& key) const override;
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override;
bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override;
+ bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const override;
};
struct FlatSigningProvider final : public SigningProvider
@@ -75,13 +77,14 @@ struct FlatSigningProvider final : public SigningProvider
std::map<CKeyID, CPubKey> pubkeys;
std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>> origins;
std::map<CKeyID, CKey> keys;
- std::map<XOnlyPubKey, TaprootSpendData> tr_spenddata; /** Map from output key to spend data. */
+ std::map<XOnlyPubKey, TaprootBuilder> tr_trees; /** Map from output key to Taproot tree (which can then make the TaprootSpendData */
bool GetCScript(const CScriptID& scriptid, CScript& script) const override;
bool GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const override;
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override;
bool GetKey(const CKeyID& keyid, CKey& key) const override;
bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override;
+ bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const override;
};
FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvider& b);
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index e25155d3dd..5d80891485 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -485,6 +485,7 @@ WitnessV1Taproot TaprootBuilder::GetOutput() { return WitnessV1Taproot{m_output_
TaprootSpendData TaprootBuilder::GetSpendData() const
{
assert(IsComplete());
+ assert(m_output_key.IsFullyValid());
TaprootSpendData spd;
spd.merkle_root = m_branch.size() == 0 ? uint256() : m_branch[0]->hash;
spd.internal_key = m_internal_key;
@@ -642,3 +643,19 @@ std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const
return ret;
}
+
+std::vector<std::tuple<uint8_t, uint8_t, CScript>> TaprootBuilder::GetTreeTuples() const
+{
+ assert(IsComplete());
+ std::vector<std::tuple<uint8_t, uint8_t, CScript>> tuples;
+ if (m_branch.size()) {
+ const auto& leaves = m_branch[0]->leaves;
+ for (const auto& leaf : leaves) {
+ assert(leaf.merkle_branch.size() <= TAPROOT_CONTROL_MAX_NODE_COUNT);
+ uint8_t depth = (uint8_t)leaf.merkle_branch.size();
+ uint8_t leaf_ver = (uint8_t)leaf.leaf_version;
+ tuples.push_back(std::make_tuple(depth, leaf_ver, leaf.script));
+ }
+ }
+ return tuples;
+}
diff --git a/src/script/standard.h b/src/script/standard.h
index 6a15ba4e3d..448fdff010 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -322,6 +322,8 @@ public:
static bool ValidDepths(const std::vector<int>& depths);
/** Compute spending data (after Finalize()). */
TaprootSpendData GetSpendData() const;
+ /** Returns a vector of tuples representing the depth, leaf version, and script */
+ std::vector<std::tuple<uint8_t, uint8_t, CScript>> GetTreeTuples() const;
};
/** Given a TaprootSpendData and the output key, reconstruct its script tree.
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 3b4a6f2637..c87ed82c88 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -305,7 +305,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(nodes[0]);
nodes[0]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[0]);
- peerLogic->Misbehaving(nodes[0]->GetId(), DISCOURAGEMENT_THRESHOLD, /*message=*/""); // Should be discouraged
+ peerLogic->UnitTestMisbehaving(nodes[0]->GetId(), DISCOURAGEMENT_THRESHOLD); // Should be discouraged
{
LOCK(nodes[0]->cs_sendProcessing);
BOOST_CHECK(peerLogic->SendMessages(nodes[0]));
@@ -328,7 +328,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(nodes[1]);
nodes[1]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[1]);
- peerLogic->Misbehaving(nodes[1]->GetId(), DISCOURAGEMENT_THRESHOLD - 1, /*message=*/"");
+ peerLogic->UnitTestMisbehaving(nodes[1]->GetId(), DISCOURAGEMENT_THRESHOLD - 1);
{
LOCK(nodes[1]->cs_sendProcessing);
BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
@@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
// [1] is not discouraged/disconnected yet.
BOOST_CHECK(!banman->IsDiscouraged(addr[1]));
BOOST_CHECK(!nodes[1]->fDisconnect);
- peerLogic->Misbehaving(nodes[1]->GetId(), 1, /*message=*/""); // [1] reaches discouragement threshold
+ peerLogic->UnitTestMisbehaving(nodes[1]->GetId(), 1); // [1] reaches discouragement threshold
{
LOCK(nodes[1]->cs_sendProcessing);
BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
@@ -366,7 +366,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(nodes[2]);
nodes[2]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[2]);
- peerLogic->Misbehaving(nodes[2]->GetId(), DISCOURAGEMENT_THRESHOLD, /*message=*/"");
+ peerLogic->UnitTestMisbehaving(nodes[2]->GetId(), DISCOURAGEMENT_THRESHOLD);
{
LOCK(nodes[2]->cs_sendProcessing);
BOOST_CHECK(peerLogic->SendMessages(nodes[2]));
@@ -411,7 +411,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
peerLogic->InitializeNode(&dummyNode);
dummyNode.fSuccessfullyConnected = true;
- peerLogic->Misbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD, /*message=*/"");
+ peerLogic->UnitTestMisbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD);
{
LOCK(dummyNode.cs_sendProcessing);
BOOST_CHECK(peerLogic->SendMessages(&dummyNode));
diff --git a/src/test/fuzz/policy_estimator.cpp b/src/test/fuzz/policy_estimator.cpp
index e4d95f72a0..58c19a91cb 100644
--- a/src/test/fuzz/policy_estimator.cpp
+++ b/src/test/fuzz/policy_estimator.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <policy/fees.h>
+#include <policy/fees_args.h>
#include <primitives/transaction.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
@@ -15,15 +16,20 @@
#include <string>
#include <vector>
+namespace {
+const BasicTestingSetup* g_setup;
+} // namespace
+
void initialize_policy_estimator()
{
static const auto testing_setup = MakeNoLogFileContext<>();
+ g_setup = testing_setup.get();
}
FUZZ_TARGET_INIT(policy_estimator, initialize_policy_estimator)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
- CBlockPolicyEstimator block_policy_estimator;
+ CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)};
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
CallOneOf(
fuzzed_data_provider,
diff --git a/src/test/fuzz/policy_estimator_io.cpp b/src/test/fuzz/policy_estimator_io.cpp
index 9021d95954..77402c260a 100644
--- a/src/test/fuzz/policy_estimator_io.cpp
+++ b/src/test/fuzz/policy_estimator_io.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <policy/fees.h>
+#include <policy/fees_args.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -11,9 +12,14 @@
#include <cstdint>
#include <vector>
+namespace {
+const BasicTestingSetup* g_setup;
+} // namespace
+
void initialize_policy_estimator_io()
{
static const auto testing_setup = MakeNoLogFileContext<>();
+ g_setup = testing_setup.get();
}
FUZZ_TARGET_INIT(policy_estimator_io, initialize_policy_estimator_io)
@@ -22,7 +28,7 @@ FUZZ_TARGET_INIT(policy_estimator_io, initialize_policy_estimator_io)
FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider);
CAutoFile fuzzed_auto_file = fuzzed_auto_file_provider.open();
// Re-using block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object.
- static CBlockPolicyEstimator block_policy_estimator;
+ static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)};
if (block_policy_estimator.Read(fuzzed_auto_file)) {
block_policy_estimator.Write(fuzzed_auto_file);
}
diff --git a/src/test/fuzz/rbf.cpp b/src/test/fuzz/rbf.cpp
index 8dcaa609b5..4801635791 100644
--- a/src/test/fuzz/rbf.cpp
+++ b/src/test/fuzz/rbf.cpp
@@ -2,12 +2,14 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <mempool_args.h>
#include <policy/rbf.h>
#include <primitives/transaction.h>
#include <sync.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <test/util/setup_common.h>
#include <txmempool.h>
#include <cstdint>
@@ -15,7 +17,17 @@
#include <string>
#include <vector>
-FUZZ_TARGET(rbf)
+namespace {
+const BasicTestingSetup* g_setup;
+} // namespace
+
+void initialize_rbf()
+{
+ static const auto testing_setup = MakeNoLogFileContext<>();
+ g_setup = testing_setup.get();
+}
+
+FUZZ_TARGET_INIT(rbf, initialize_rbf)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
SetMockTime(ConsumeTime(fuzzed_data_provider));
@@ -23,8 +35,11 @@ FUZZ_TARGET(rbf)
if (!mtx) {
return;
}
- CTxMemPool pool;
- LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
+
+ CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node)};
+
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000)
+ {
const std::optional<CMutableTransaction> another_mtx = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider);
if (!another_mtx) {
break;
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index e4e83c3f32..26913a41d2 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -159,6 +159,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"signrawtransactionwithkey",
"submitblock",
"submitheader",
+ "submitpackage",
"syncwithvalidationinterfacequeue",
"testmempoolaccept",
"uptime",
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index 4f40608c4f..2d88ee295b 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -3,6 +3,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <consensus/validation.h>
+#include <mempool_args.h>
+#include <node/context.h>
#include <node/miner.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
@@ -15,6 +17,7 @@
#include <validationinterface.h>
using node::BlockAssembler;
+using node::NodeContext;
namespace {
@@ -121,6 +124,19 @@ void MockTime(FuzzedDataProvider& fuzzed_data_provider, const CChainState& chain
SetMockTime(time);
}
+CTxMemPool MakeMempool(const NodeContext& node)
+{
+ // Take the default options for tests...
+ CTxMemPool::Options mempool_opts{MemPoolOptionsForTest(node)};
+
+ // ...override specific options for this specific fuzz suite
+ mempool_opts.estimator = nullptr;
+ mempool_opts.check_ratio = 1;
+
+ // ...and construct a CTxMemPool from it
+ return CTxMemPool{mempool_opts};
+}
+
FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
@@ -142,7 +158,7 @@ FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
// The sum of the values of all spendable outpoints
constexpr CAmount SUPPLY_TOTAL{COINBASE_MATURITY * 50 * COIN};
- CTxMemPool tx_pool_{/*estimator=*/nullptr, /*check_ratio=*/1};
+ CTxMemPool tx_pool_{MakeMempool(node)};
MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
chainstate.SetMempool(&tx_pool);
@@ -320,7 +336,7 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
txids.push_back(ConsumeUInt256(fuzzed_data_provider));
}
- CTxMemPool tx_pool_{/*estimator=*/nullptr, /*check_ratio=*/1};
+ CTxMemPool tx_pool_{MakeMempool(node)};
MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index 8f5e771e37..4b893c648e 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -155,6 +155,45 @@ int FuzzedSock::Connect(const sockaddr*, socklen_t) const
return 0;
}
+int FuzzedSock::Bind(const sockaddr*, socklen_t) const
+{
+ // Have a permanent error at bind_errnos[0] because when the fuzzed data is exhausted
+ // SetFuzzedErrNo() will always set the global errno to bind_errnos[0]. We want to
+ // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
+ // repeatedly because proper code should retry on temporary errors, leading to an
+ // infinite loop.
+ constexpr std::array bind_errnos{
+ EACCES,
+ EADDRINUSE,
+ EADDRNOTAVAIL,
+ EAGAIN,
+ };
+ if (m_fuzzed_data_provider.ConsumeBool()) {
+ SetFuzzedErrNo(m_fuzzed_data_provider, bind_errnos);
+ return -1;
+ }
+ return 0;
+}
+
+int FuzzedSock::Listen(int) const
+{
+ // Have a permanent error at listen_errnos[0] because when the fuzzed data is exhausted
+ // SetFuzzedErrNo() will always set the global errno to listen_errnos[0]. We want to
+ // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
+ // repeatedly because proper code should retry on temporary errors, leading to an
+ // infinite loop.
+ constexpr std::array listen_errnos{
+ EADDRINUSE,
+ EINVAL,
+ EOPNOTSUPP,
+ };
+ if (m_fuzzed_data_provider.ConsumeBool()) {
+ SetFuzzedErrNo(m_fuzzed_data_provider, listen_errnos);
+ return -1;
+ }
+ return 0;
+}
+
std::unique_ptr<Sock> FuzzedSock::Accept(sockaddr* addr, socklen_t* addr_len) const
{
constexpr std::array accept_errnos{
@@ -201,6 +240,20 @@ int FuzzedSock::SetSockOpt(int, int, const void*, socklen_t) const
return 0;
}
+int FuzzedSock::GetSockName(sockaddr* name, socklen_t* name_len) const
+{
+ constexpr std::array getsockname_errnos{
+ ECONNRESET,
+ ENOBUFS,
+ };
+ if (m_fuzzed_data_provider.ConsumeBool()) {
+ SetFuzzedErrNo(m_fuzzed_data_provider, getsockname_errnos);
+ return -1;
+ }
+ *name_len = m_fuzzed_data_provider.ConsumeData(name, *name_len);
+ return 0;
+}
+
bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
{
constexpr std::array wait_errnos{
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 0819d326fd..4b89ad9bdc 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -61,12 +61,18 @@ public:
int Connect(const sockaddr*, socklen_t) const override;
+ int Bind(const sockaddr*, socklen_t) const override;
+
+ int Listen(int backlog) const override;
+
std::unique_ptr<Sock> Accept(sockaddr* addr, socklen_t* addr_len) const override;
int GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const override;
int SetSockOpt(int level, int opt_name, const void* opt_val, socklen_t opt_len) const override;
+ int GetSockName(sockaddr* name, socklen_t* name_len) const override;
+
bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const override;
bool WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const override;
diff --git a/src/test/fuzz/validation_load_mempool.cpp b/src/test/fuzz/validation_load_mempool.cpp
index c2aaf486c5..9532610f8d 100644
--- a/src/test/fuzz/validation_load_mempool.cpp
+++ b/src/test/fuzz/validation_load_mempool.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chainparamsbase.h>
+#include <mempool_args.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -30,7 +31,8 @@ FUZZ_TARGET_INIT(validation_load_mempool, initialize_validation_load_mempool)
SetMockTime(ConsumeTime(fuzzed_data_provider));
FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider);
- CTxMemPool pool{};
+ CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node)};
+
auto fuzzed_fopen = [&](const fs::path&, const char*) {
return fuzzed_file_provider.open();
};
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index bc63122025..8c745b07b9 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -16,6 +16,12 @@ BOOST_FIXTURE_TEST_SUITE(mempool_tests, TestingSetup)
static constexpr auto REMOVAL_REASON_DUMMY = MemPoolRemovalReason::REPLACED;
+class MemPoolTest final : public CTxMemPool
+{
+public:
+ using CTxMemPool::GetMinFee;
+};
+
BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
{
// Test CTxMemPool::remove functionality
@@ -423,7 +429,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
{
- CTxMemPool& pool = *Assert(m_node.mempool);
+ auto& pool = static_cast<MemPoolTest&>(*Assert(m_node.mempool));
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index eca4fbf15c..20d670c1e1 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -369,8 +369,8 @@ void MinerTestingSetup::TestBasicMining(const CChainParams& chainparams, const C
}
// non-final txs in mempool
- SetMockTime(m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1);
- const int flags{LOCKTIME_VERIFY_SEQUENCE | LOCKTIME_MEDIAN_TIME_PAST};
+ SetMockTime(m_node.chainman->ActiveChain().Tip()->GetMedianTimePast() + 1);
+ const int flags{LOCKTIME_VERIFY_SEQUENCE};
// height map
std::vector<int> prevheights;
diff --git a/src/test/util/net.h b/src/test/util/net.h
index edb45d7c8e..c5dbaeca3e 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -122,6 +122,10 @@ public:
int Connect(const sockaddr*, socklen_t) const override { return 0; }
+ int Bind(const sockaddr*, socklen_t) const override { return 0; }
+
+ int Listen(int) const override { return 0; }
+
std::unique_ptr<Sock> Accept(sockaddr* addr, socklen_t* addr_len) const override
{
if (addr != nullptr) {
@@ -147,6 +151,12 @@ public:
int SetSockOpt(int, int, const void*, socklen_t) const override { return 0; }
+ int GetSockName(sockaddr* name, socklen_t* name_len) const override
+ {
+ std::memset(name, 0x0, *name_len);
+ return 0;
+ }
+
bool Wait(std::chrono::milliseconds timeout,
Event requested,
Event* occurred = nullptr) const override
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index d9fff85bf5..0c9e880d67 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -14,13 +14,16 @@
#include <init.h>
#include <init/common.h>
#include <interfaces/chain.h>
+#include <mempool_args.h>
#include <net.h>
#include <net_processing.h>
#include <node/blockstorage.h>
#include <node/chainstate.h>
+#include <node/context.h>
#include <node/miner.h>
#include <noui.h>
#include <policy/fees.h>
+#include <policy/fees_args.h>
#include <pow.h>
#include <rpc/blockchain.h>
#include <rpc/register.h>
@@ -32,6 +35,8 @@
#include <test/util/net.h>
#include <timedata.h>
#include <txdb.h>
+#include <txmempool.h>
+#include <util/designator.h>
#include <util/strencodings.h>
#include <util/string.h>
#include <util/thread.h>
@@ -50,11 +55,12 @@
using node::BlockAssembler;
using node::CalculateCacheSizes;
+using node::fPruneMode;
+using node::fReindex;
using node::LoadChainstate;
+using node::NodeContext;
using node::RegenerateCommitments;
using node::VerifyLoadedChainstate;
-using node::fPruneMode;
-using node::fReindex;
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
UrlDecodeFn* const URL_DECODE = nullptr;
@@ -149,6 +155,18 @@ BasicTestingSetup::~BasicTestingSetup()
gArgs.ClearArgs();
}
+CTxMemPool::Options MemPoolOptionsForTest(const NodeContext& node)
+{
+ CTxMemPool::Options mempool_opts{
+ Desig(estimator) node.fee_estimator.get(),
+ // Default to always checking mempool regardless of
+ // chainparams.DefaultConsistencyChecks for tests
+ Desig(check_ratio) 1,
+ };
+ ApplyArgsManOptions(*node.args, mempool_opts);
+ return mempool_opts;
+}
+
ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::vector<const char*>& extra_args)
: BasicTestingSetup(chainName, extra_args)
{
@@ -160,8 +178,8 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve
m_node.scheduler->m_service_thread = std::thread(util::TraceThread, "scheduler", [&] { m_node.scheduler->serviceQueue(); });
GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
- m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>();
- m_node.mempool = std::make_unique<CTxMemPool>(m_node.fee_estimator.get(), m_node.args->GetIntArg("-checkmempool", 1));
+ m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(*m_node.args));
+ m_node.mempool = std::make_unique<CTxMemPool>(MemPoolOptionsForTest(m_node));
m_cache_sizes = CalculateCacheSizes(m_args);
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 37407bcb92..ed2c5db7e6 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -90,6 +90,9 @@ struct BasicTestingSetup {
ArgsManager m_args;
};
+
+CTxMemPool::Options MemPoolOptionsForTest(const node::NodeContext& node);
+
/** Testing setup that performs all steps up until right before
* ChainstateManager gets initialized. Meant for testing ChainstateManager
* initialization behaviour.
diff --git a/src/txdb.cpp b/src/txdb.cpp
index a0939873ad..c048c2d92a 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -211,7 +211,6 @@ public:
bool GetKey(COutPoint &key) const override;
bool GetValue(Coin &coin) const override;
- unsigned int GetValueSize() const override;
bool Valid() const override;
void Next() override;
@@ -257,11 +256,6 @@ bool CCoinsViewDBCursor::GetValue(Coin &coin) const
return pcursor->GetValue(coin);
}
-unsigned int CCoinsViewDBCursor::GetValueSize() const
-{
- return pcursor->GetValueSize();
-}
-
bool CCoinsViewDBCursor::Valid() const
{
return keyTmp.first == DB_COIN;
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 65c8b4ea60..69ae9fed99 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -14,7 +14,9 @@
#include <policy/policy.h>
#include <policy/settings.h>
#include <reverse_iterator.h>
+#include <util/check.h>
#include <util/moneystr.h>
+#include <util/overflow.h>
#include <util/system.h>
#include <util/time.h>
#include <validationinterface.h>
@@ -82,6 +84,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& tx, CAmount fee,
entryHeight{entry_height},
spendsCoinbase{spends_coinbase},
sigOpCost{sigops_cost},
+ m_modified_fee{nFee},
lockPoints{lp},
nSizeWithDescendants{GetTxSize()},
nModFeesWithDescendants{nFee},
@@ -89,11 +92,11 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& tx, CAmount fee,
nModFeesWithAncestors{nFee},
nSigOpCostWithAncestors{sigOpCost} {}
-void CTxMemPoolEntry::UpdateFeeDelta(CAmount newFeeDelta)
+void CTxMemPoolEntry::UpdateModifiedFee(CAmount fee_diff)
{
- nModFeesWithDescendants += newFeeDelta - feeDelta;
- nModFeesWithAncestors += newFeeDelta - feeDelta;
- feeDelta = newFeeDelta;
+ nModFeesWithDescendants = SaturatingAdd(nModFeesWithDescendants, fee_diff);
+ nModFeesWithAncestors = SaturatingAdd(nModFeesWithAncestors, fee_diff);
+ m_modified_fee = SaturatingAdd(m_modified_fee, fee_diff);
}
void CTxMemPoolEntry::UpdateLockPoints(const LockPoints& lp)
@@ -107,8 +110,7 @@ size_t CTxMemPoolEntry::GetTxSize() const
}
void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
- const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
- uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove)
{
CTxMemPoolEntry::Children stageEntries, descendants;
stageEntries = updateIt->GetMemPoolChildrenConst();
@@ -148,7 +150,7 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendan
// Don't directly remove the transaction here -- doing so would
// invalidate iterators in cachedDescendants. Mark it for removal
// by inserting into descendants_to_remove.
- if (descendant.GetCountWithAncestors() > ancestor_count_limit || descendant.GetSizeWithAncestors() > ancestor_size_limit) {
+ if (descendant.GetCountWithAncestors() > uint64_t(m_limits.ancestor_count) || descendant.GetSizeWithAncestors() > uint64_t(m_limits.ancestor_size_vbytes)) {
descendants_to_remove.insert(descendant.GetTx().GetHash());
}
}
@@ -156,7 +158,7 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendan
mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount));
}
-void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate, uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
+void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate)
{
AssertLockHeld(cs);
// For each entry in vHashesToUpdate, store the set of in-mempool, but not
@@ -199,7 +201,7 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
}
}
} // release epoch guard for UpdateForDescendants
- UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove, ancestor_size_limit, ancestor_count_limit);
+ UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove);
}
for (const auto& txid : descendants_to_remove) {
@@ -435,7 +437,7 @@ void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, CAmount modifyFe
{
nSizeWithDescendants += modifySize;
assert(int64_t(nSizeWithDescendants) > 0);
- nModFeesWithDescendants += modifyFee;
+ nModFeesWithDescendants = SaturatingAdd(nModFeesWithDescendants, modifyFee);
nCountWithDescendants += modifyCount;
assert(int64_t(nCountWithDescendants) > 0);
}
@@ -444,15 +446,19 @@ void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee,
{
nSizeWithAncestors += modifySize;
assert(int64_t(nSizeWithAncestors) > 0);
- nModFeesWithAncestors += modifyFee;
+ nModFeesWithAncestors = SaturatingAdd(nModFeesWithAncestors, modifyFee);
nCountWithAncestors += modifyCount;
assert(int64_t(nCountWithAncestors) > 0);
nSigOpCostWithAncestors += modifySigOps;
assert(int(nSigOpCostWithAncestors) >= 0);
}
-CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator, int check_ratio)
- : m_check_ratio(check_ratio), minerPolicyEstimator(estimator)
+CTxMemPool::CTxMemPool(const Options& opts)
+ : m_check_ratio{opts.check_ratio},
+ minerPolicyEstimator{opts.estimator},
+ m_max_size_bytes{opts.max_size_bytes},
+ m_expiry{opts.expiry},
+ m_limits{opts.limits}
{
_clear(); //lock free clear
}
@@ -483,8 +489,10 @@ void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAnces
// Update transaction for any feeDelta created by PrioritiseTransaction
CAmount delta{0};
ApplyDelta(entry.GetTx().GetHash(), delta);
+ // The following call to UpdateModifiedFee assumes no previous fee modifications
+ Assume(entry.GetFee() == entry.GetModifiedFee());
if (delta) {
- mapTx.modify(newit, [&delta](CTxMemPoolEntry& e) { e.UpdateFeeDelta(delta); });
+ mapTx.modify(newit, [&delta](CTxMemPoolEntry& e) { e.UpdateModifiedFee(delta); });
}
// Update cachedInnerUsage to include contained transaction's usage.
@@ -917,10 +925,10 @@ void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeD
{
LOCK(cs);
CAmount &delta = mapDeltas[hash];
- delta += nFeeDelta;
+ delta = SaturatingAdd(delta, nFeeDelta);
txiter it = mapTx.find(hash);
if (it != mapTx.end()) {
- mapTx.modify(it, [&delta](CTxMemPoolEntry& e) { e.UpdateFeeDelta(delta); });
+ mapTx.modify(it, [&nFeeDelta](CTxMemPoolEntry& e) { e.UpdateModifiedFee(nFeeDelta); });
// Now update all ancestors' modified fees with descendants
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
diff --git a/src/txmempool.h b/src/txmempool.h
index f5d5abc62e..f44e78fde5 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -14,6 +14,9 @@
#include <utility>
#include <vector>
+#include <kernel/mempool_limits.h>
+#include <kernel/mempool_options.h>
+
#include <coins.h>
#include <consensus/amount.h>
#include <indirectmap.h>
@@ -101,7 +104,7 @@ private:
const unsigned int entryHeight; //!< Chain height when entering the mempool
const bool spendsCoinbase; //!< keep track of transactions that spend a coinbase
const int64_t sigOpCost; //!< Total sigop cost
- CAmount feeDelta{0}; //!< Used for determining the priority of the transaction for mining in a block
+ CAmount m_modified_fee; //!< Used for determining the priority of the transaction for mining in a block
LockPoints lockPoints; //!< Track the height and time at which tx was final
// Information about descendants of this transaction that are in the
@@ -131,7 +134,7 @@ public:
std::chrono::seconds GetTime() const { return std::chrono::seconds{nTime}; }
unsigned int GetHeight() const { return entryHeight; }
int64_t GetSigOpCost() const { return sigOpCost; }
- CAmount GetModifiedFee() const { return nFee + feeDelta; }
+ CAmount GetModifiedFee() const { return m_modified_fee; }
size_t DynamicMemoryUsage() const { return nUsageSize; }
const LockPoints& GetLockPoints() const { return lockPoints; }
@@ -139,9 +142,8 @@ public:
void UpdateDescendantState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount);
// Adjusts the ancestor state
void UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int64_t modifySigOps);
- // Updates the fee delta used for mining priority score, and the
- // modified fees with descendants/ancestors.
- void UpdateFeeDelta(CAmount newFeeDelta);
+ // Updates the modified fees with descendants/ancestors.
+ void UpdateModifiedFee(CAmount fee_diff);
// Update the LockPoints after a reorg
void UpdateLockPoints(const LockPoints& lp);
@@ -451,6 +453,8 @@ protected:
bool m_is_loaded GUARDED_BY(cs){false};
+ CFeeRate GetMinFee(size_t sizelimit) const;
+
public:
static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; // public only for testing
@@ -560,15 +564,21 @@ public:
indirectmap<COutPoint, const CTransaction*> mapNextTx GUARDED_BY(cs);
std::map<uint256, CAmount> mapDeltas GUARDED_BY(cs);
+ using Options = kernel::MemPoolOptions;
+
+ const int64_t m_max_size_bytes;
+ const std::chrono::seconds m_expiry;
+
+ using Limits = kernel::MemPoolLimits;
+
+ const Limits m_limits;
+
/** Create a new CTxMemPool.
* Sanity checks will be off by default for performance, because otherwise
* accepting transactions becomes O(N^2) where N is the number of transactions
* in the pool.
- *
- * @param[in] estimator is used to estimate appropriate transaction fees.
- * @param[in] check_ratio is the ratio used to determine how often sanity checks will run.
*/
- explicit CTxMemPool(CBlockPolicyEstimator* estimator = nullptr, int check_ratio = 0);
+ explicit CTxMemPool(const Options& opts);
/**
* If sanity-checking is turned on, check makes sure the pool is
@@ -648,13 +658,8 @@ public:
*
* @param[in] vHashesToUpdate The set of txids from the
* disconnected block that have been accepted back into the mempool.
- * @param[in] ancestor_size_limit The maximum allowed size in virtual
- * bytes of an entry and its ancestors
- * @param[in] ancestor_count_limit The maximum allowed number of
- * transactions including the entry and its ancestors.
*/
- void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate,
- uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
+ void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
/** Try to calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
@@ -701,7 +706,9 @@ public:
* takes the fee rate to go back down all the way to 0. When the feerate
* would otherwise be half of this, it is set to 0 instead.
*/
- CFeeRate GetMinFee(size_t sizelimit) const;
+ CFeeRate GetMinFee() const {
+ return GetMinFee(m_max_size_bytes);
+ }
/** Remove transactions from the mempool until its dynamic size is <= sizelimit.
* pvNoSpendsRemaining, if set, will be populated with the list of outpoints
@@ -827,14 +834,9 @@ private:
* @param[out] descendants_to_remove Populated with the txids of entries that
* exceed ancestor limits. It's the responsibility of the caller to
* removeRecursive them.
- * @param[in] ancestor_size_limit the max number of ancestral bytes allowed
- * for any descendant
- * @param[in] ancestor_count_limit the max number of ancestor transactions
- * allowed for any descendant
*/
void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
- const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
- uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Update ancestors of hash to add/remove it as a descendant transaction. */
void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Set ancestor state for an entry */
diff --git a/src/util/error.cpp b/src/util/error.cpp
index af8cbd0353..22a5964279 100644
--- a/src/util/error.cpp
+++ b/src/util/error.cpp
@@ -35,6 +35,8 @@ bilingual_str TransactionErrorString(const TransactionError err)
return Untranslated("External signer not found");
case TransactionError::EXTERNAL_SIGNER_FAILED:
return Untranslated("External signer failed to sign");
+ case TransactionError::INVALID_PACKAGE:
+ return Untranslated("Transaction rejected due to invalid package");
// no default case, so the compiler can warn about missing cases
}
assert(false);
diff --git a/src/util/error.h b/src/util/error.h
index 4cc35eb1fd..0429de651a 100644
--- a/src/util/error.h
+++ b/src/util/error.h
@@ -32,6 +32,7 @@ enum class TransactionError {
MAX_FEE_EXCEEDED,
EXTERNAL_SIGNER_NOT_FOUND,
EXTERNAL_SIGNER_FAILED,
+ INVALID_PACKAGE,
};
bilingual_str TransactionErrorString(const TransactionError error);
diff --git a/src/util/sock.cpp b/src/util/sock.cpp
index 1d44fbfdae..2588575d81 100644
--- a/src/util/sock.cpp
+++ b/src/util/sock.cpp
@@ -66,6 +66,16 @@ int Sock::Connect(const sockaddr* addr, socklen_t addr_len) const
return connect(m_socket, addr, addr_len);
}
+int Sock::Bind(const sockaddr* addr, socklen_t addr_len) const
+{
+ return bind(m_socket, addr, addr_len);
+}
+
+int Sock::Listen(int backlog) const
+{
+ return listen(m_socket, backlog);
+}
+
std::unique_ptr<Sock> Sock::Accept(sockaddr* addr, socklen_t* addr_len) const
{
#ifdef WIN32
@@ -102,6 +112,11 @@ int Sock::SetSockOpt(int level, int opt_name, const void* opt_val, socklen_t opt
return setsockopt(m_socket, level, opt_name, static_cast<const char*>(opt_val), opt_len);
}
+int Sock::GetSockName(sockaddr* name, socklen_t* name_len) const
+{
+ return getsockname(m_socket, name, name_len);
+}
+
bool Sock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
{
// We need a `shared_ptr` owning `this` for `WaitMany()`, but don't want
diff --git a/src/util/sock.h b/src/util/sock.h
index 5ca5f1b91b..b854609c22 100644
--- a/src/util/sock.h
+++ b/src/util/sock.h
@@ -87,6 +87,18 @@ public:
[[nodiscard]] virtual int Connect(const sockaddr* addr, socklen_t addr_len) const;
/**
+ * bind(2) wrapper. Equivalent to `bind(this->Get(), addr, addr_len)`. Code that uses this
+ * wrapper can be unit tested if this method is overridden by a mock Sock implementation.
+ */
+ [[nodiscard]] virtual int Bind(const sockaddr* addr, socklen_t addr_len) const;
+
+ /**
+ * listen(2) wrapper. Equivalent to `listen(this->Get(), backlog)`. Code that uses this
+ * wrapper can be unit tested if this method is overridden by a mock Sock implementation.
+ */
+ [[nodiscard]] virtual int Listen(int backlog) const;
+
+ /**
* accept(2) wrapper. Equivalent to `std::make_unique<Sock>(accept(this->Get(), addr, addr_len))`.
* Code that uses this wrapper can be unit tested if this method is overridden by a mock Sock
* implementation.
@@ -114,6 +126,13 @@ public:
const void* opt_val,
socklen_t opt_len) const;
+ /**
+ * getsockname(2) wrapper. Equivalent to
+ * `getsockname(this->Get(), name, name_len)`. Code that uses this
+ * wrapper can be unit tested if this method is overridden by a mock Sock implementation.
+ */
+ [[nodiscard]] virtual int GetSockName(sockaddr* name, socklen_t* name_len) const;
+
using Event = uint8_t;
/**
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 1ae82fbc96..f6f2828fc8 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -55,13 +55,6 @@
#else
-#ifdef _MSC_VER
-#pragma warning(disable:4786)
-#pragma warning(disable:4804)
-#pragma warning(disable:4805)
-#pragma warning(disable:4717)
-#endif
-
#include <codecvt>
#include <io.h> /* for _commit */
@@ -610,35 +603,75 @@ bool ArgsManager::IsArgNegated(const std::string& strArg) const
std::string ArgsManager::GetArg(const std::string& strArg, const std::string& strDefault) const
{
+ return GetArg(strArg).value_or(strDefault);
+}
+
+std::optional<std::string> ArgsManager::GetArg(const std::string& strArg) const
+{
const util::SettingsValue value = GetSetting(strArg);
- return SettingToString(value, strDefault);
+ return SettingToString(value);
+}
+
+std::optional<std::string> SettingToString(const util::SettingsValue& value)
+{
+ if (value.isNull()) return std::nullopt;
+ if (value.isFalse()) return "0";
+ if (value.isTrue()) return "1";
+ if (value.isNum()) return value.getValStr();
+ return value.get_str();
}
std::string SettingToString(const util::SettingsValue& value, const std::string& strDefault)
{
- return value.isNull() ? strDefault : value.isFalse() ? "0" : value.isTrue() ? "1" : value.isNum() ? value.getValStr() : value.get_str();
+ return SettingToString(value).value_or(strDefault);
}
int64_t ArgsManager::GetIntArg(const std::string& strArg, int64_t nDefault) const
{
+ return GetIntArg(strArg).value_or(nDefault);
+}
+
+std::optional<int64_t> ArgsManager::GetIntArg(const std::string& strArg) const
+{
const util::SettingsValue value = GetSetting(strArg);
- return SettingToInt(value, nDefault);
+ return SettingToInt(value);
+}
+
+std::optional<int64_t> SettingToInt(const util::SettingsValue& value)
+{
+ if (value.isNull()) return std::nullopt;
+ if (value.isFalse()) return 0;
+ if (value.isTrue()) return 1;
+ if (value.isNum()) return value.getInt<int64_t>();
+ return LocaleIndependentAtoi<int64_t>(value.get_str());
}
int64_t SettingToInt(const util::SettingsValue& value, int64_t nDefault)
{
- return value.isNull() ? nDefault : value.isFalse() ? 0 : value.isTrue() ? 1 : value.isNum() ? value.getInt<int64_t>() : LocaleIndependentAtoi<int64_t>(value.get_str());
+ return SettingToInt(value).value_or(nDefault);
}
bool ArgsManager::GetBoolArg(const std::string& strArg, bool fDefault) const
{
+ return GetBoolArg(strArg).value_or(fDefault);
+}
+
+std::optional<bool> ArgsManager::GetBoolArg(const std::string& strArg) const
+{
const util::SettingsValue value = GetSetting(strArg);
- return SettingToBool(value, fDefault);
+ return SettingToBool(value);
+}
+
+std::optional<bool> SettingToBool(const util::SettingsValue& value)
+{
+ if (value.isNull()) return std::nullopt;
+ if (value.isBool()) return value.get_bool();
+ return InterpretBool(value.get_str());
}
bool SettingToBool(const util::SettingsValue& value, bool fDefault)
{
- return value.isNull() ? fDefault : value.isBool() ? value.get_bool() : InterpretBool(value.get_str());
+ return SettingToBool(value).value_or(fDefault);
}
bool ArgsManager::SoftSetArg(const std::string& strArg, const std::string& strValue)
diff --git a/src/util/system.h b/src/util/system.h
index 07d7a533aa..04c66341d3 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -161,8 +161,13 @@ struct SectionInfo
};
std::string SettingToString(const util::SettingsValue&, const std::string&);
+std::optional<std::string> SettingToString(const util::SettingsValue&);
+
int64_t SettingToInt(const util::SettingsValue&, int64_t);
+std::optional<int64_t> SettingToInt(const util::SettingsValue&);
+
bool SettingToBool(const util::SettingsValue&, bool);
+std::optional<bool> SettingToBool(const util::SettingsValue&);
class ArgsManager
{
@@ -335,6 +340,7 @@ protected:
* @return command-line argument or default value
*/
std::string GetArg(const std::string& strArg, const std::string& strDefault) const;
+ std::optional<std::string> GetArg(const std::string& strArg) const;
/**
* Return path argument or default value
@@ -356,6 +362,7 @@ protected:
* @return command-line argument (0 if invalid number) or default value
*/
int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const;
+ std::optional<int64_t> GetIntArg(const std::string& strArg) const;
/**
* Return boolean argument or default value
@@ -365,6 +372,7 @@ protected:
* @return command-line argument or default value
*/
bool GetBoolArg(const std::string& strArg, bool fDefault) const;
+ std::optional<bool> GetBoolArg(const std::string& strArg) const;
/**
* Set an argument if it doesn't already have a value
diff --git a/src/util/time.h b/src/util/time.h
index ad91a72860..0f87d66c2e 100644
--- a/src/util/time.h
+++ b/src/util/time.h
@@ -40,10 +40,15 @@ void UninterruptibleSleep(const std::chrono::microseconds& n);
* This helper is used to convert durations/time_points before passing them over an
* interface that doesn't support std::chrono (e.g. RPC, debug log, or the GUI)
*/
+template <typename Dur1, typename Dur2>
+constexpr auto Ticks(Dur2 d)
+{
+ return std::chrono::duration_cast<Dur1>(d).count();
+}
template <typename Duration, typename Timepoint>
constexpr auto TicksSinceEpoch(Timepoint t)
{
- return std::chrono::time_point_cast<Duration>(t).time_since_epoch().count();
+ return Ticks<Duration>(t.time_since_epoch());
}
constexpr int64_t count_seconds(std::chrono::seconds t) { return t.count(); }
constexpr int64_t count_milliseconds(std::chrono::milliseconds t) { return t.count(); }
@@ -52,11 +57,6 @@ constexpr int64_t count_microseconds(std::chrono::microseconds t) { return t.cou
using SecondsDouble = std::chrono::duration<double, std::chrono::seconds::period>;
/**
- * Helper to count the seconds in any std::chrono::duration type
- */
-inline double CountSecondsDouble(SecondsDouble t) { return t.count(); }
-
-/**
* DEPRECATED
* Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
* ClockType is
diff --git a/src/validation.cpp b/src/validation.cpp
index b775c85912..6b21d33871 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -255,18 +255,18 @@ bool CheckSequenceLocksAtTip(CBlockIndex* tip,
// Returns the script flags which should be checked for a given block
static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman);
-static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
+static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
{
AssertLockHeld(::cs_main);
AssertLockHeld(pool.cs);
- int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
+ int expired = pool.Expire(GetTime<std::chrono::seconds>() - pool.m_expiry);
if (expired != 0) {
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
}
std::vector<COutPoint> vNoSpendsRemaining;
- pool.TrimToSize(limit, &vNoSpendsRemaining);
+ pool.TrimToSize(pool.m_max_size_bytes, &vNoSpendsRemaining);
for (const COutPoint& removed : vNoSpendsRemaining)
coins_cache.Uncache(removed);
}
@@ -320,9 +320,7 @@ void CChainState::MaybeUpdateMempoolForReorg(
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in
// the disconnectpool that were added back and cleans up the mempool state.
- const uint64_t ancestor_count_limit = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- const uint64_t ancestor_size_limit = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
- m_mempool->UpdateTransactionsFromBlock(vHashUpdate, ancestor_size_limit, ancestor_count_limit);
+ m_mempool->UpdateTransactionsFromBlock(vHashUpdate);
// Predicate to use for filtering transactions in removeForReorg.
// Checks whether the transaction is still final and, if it spends a coinbase output, mature.
@@ -374,11 +372,7 @@ void CChainState::MaybeUpdateMempoolForReorg(
// We also need to remove any now-immature transactions
m_mempool->removeForReorg(m_chain, filter_final_and_mature);
// Re-limit mempool size, in case we added any transactions
- LimitMempoolSize(
- *m_mempool,
- this->CoinsTip(),
- gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
- std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
+ LimitMempoolSize(*m_mempool, this->CoinsTip());
}
/**
@@ -429,10 +423,10 @@ class MemPoolAccept
{
public:
explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
- m_limit_ancestors(gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
- m_limit_ancestor_size(gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
- m_limit_descendants(gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
- m_limit_descendant_size(gArgs.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
+ m_limit_ancestors(m_pool.m_limits.ancestor_count),
+ m_limit_ancestor_size(m_pool.m_limits.ancestor_size_vbytes),
+ m_limit_descendants(m_pool.m_limits.descendant_count),
+ m_limit_descendant_size(m_pool.m_limits.descendant_size_vbytes) {
}
// We put the arguments we're handed into a struct, so we can pass them
@@ -644,7 +638,7 @@ private:
{
AssertLockHeld(::cs_main);
AssertLockHeld(m_pool.cs);
- CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
+ CAmount mempoolRejectFee = m_pool.GetMinFee().GetFee(package_size);
if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
}
@@ -1082,7 +1076,7 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
// in the package. LimitMempoolSize() should be called at the very end to make sure the mempool
// is still within limits and package submission happens atomically.
if (!args.m_package_submission && !bypass_limits) {
- LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
+ LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
if (!m_pool.exists(GenTxid::Txid(hash)))
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
}
@@ -1147,9 +1141,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
// It may or may not be the case that all the transactions made it into the mempool. Regardless,
// make sure we haven't exceeded max mempool size.
- LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(),
- gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
- std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
+ LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
// Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
// but don't report success unless they all made it into the mempool.
@@ -2292,7 +2284,7 @@ CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
AssertLockHeld(::cs_main);
return this->GetCoinsCacheSizeState(
m_coinstip_cache_size_bytes,
- gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
+ m_mempool ? m_mempool->m_max_size_bytes : 0);
}
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
@@ -3491,15 +3483,15 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Enforce BIP113 (Median Time Past).
- int nLockTimeFlags = 0;
+ bool enforce_locktime_median_time_past{false};
if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CSV)) {
assert(pindexPrev != nullptr);
- nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
+ enforce_locktime_median_time_past = true;
}
- int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
- ? pindexPrev->GetMedianTimePast()
- : block.GetBlockTime();
+ const int64_t nLockTimeCutoff{enforce_locktime_median_time_past ?
+ pindexPrev->GetMedianTimePast() :
+ block.GetBlockTime()};
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
@@ -4647,7 +4639,7 @@ static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
{
- int64_t nExpiryTimeout = gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
+ int64_t nExpiryTimeout = std::chrono::seconds{pool.m_expiry}.count();
FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat", "rb")};
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
diff --git a/src/validation.h b/src/validation.h
index 3b6cd509c6..0e27e117fa 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -59,8 +59,6 @@ namespace Consensus {
struct Params;
} // namespace Consensus
-/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
-static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
/** Maximum number of dedicated script-checking threads allowed */
static const int MAX_SCRIPTCHECK_THREADS = 15;
/** -par default (number of script-checking threads, 0 = auto) */
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index f8230f7a1d..dbd768a758 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -315,12 +315,6 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, b
env = database.env.get();
pdb = database.m_db.get();
strFile = fs::PathToString(database.m_filename);
- if (!Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
}
void BerkeleyDatabase::Open()
diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp
index 07df8d9fc8..49e6bac462 100644
--- a/src/wallet/coinselection.cpp
+++ b/src/wallet/coinselection.cpp
@@ -104,9 +104,6 @@ std::optional<SelectionResult> SelectCoinsBnB(std::vector<OutputGroup>& utxo_poo
if (curr_waste <= best_waste) {
best_selection = curr_selection;
best_waste = curr_waste;
- if (best_waste == 0) {
- break;
- }
}
curr_waste -= (curr_value - selection_target); // Remove the excess value as we will be selecting different coins now
backtrack = true;
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index 8633e7c62c..1fec82a485 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -2180,6 +2180,19 @@ TransactionError DescriptorScriptPubKeyMan::FillPSBT(PartiallySignedTransaction&
*keys = Merge(*keys, *pk_keys);
}
}
+ for (const auto& pk_pair : input.m_tap_bip32_paths) {
+ const XOnlyPubKey& pubkey = pk_pair.first;
+ for (unsigned char prefix : {0x02, 0x03}) {
+ unsigned char b[33] = {prefix};
+ std::copy(pubkey.begin(), pubkey.end(), b + 1);
+ CPubKey fullpubkey;
+ fullpubkey.Set(b, b + 33);
+ std::unique_ptr<FlatSigningProvider> pk_keys = GetSigningProvider(fullpubkey);
+ if (pk_keys) {
+ *keys = Merge(*keys, *pk_keys);
+ }
+ }
+ }
}
SignPSBTInput(HidingSigningProvider(keys.get(), !sign, !bip32derivs), psbtx, i, &txdata, sighash_type, nullptr, finalize);
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 5799a9ff2a..1d22d0993e 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -397,10 +397,13 @@ std::optional<SelectionResult> AttemptSelection(const CWallet& wallet, const CAm
// The knapsack solver has some legacy behavior where it will spend dust outputs. We retain this behavior, so don't filter for positive only here.
std::vector<OutputGroup> all_groups = GroupOutputs(wallet, coins, coin_selection_params, eligibility_filter, false /* positive_only */);
+ CAmount target_with_change = nTargetValue;
// While nTargetValue includes the transaction fees for non-input things, it does not include the fee for creating a change output.
- // So we need to include that for KnapsackSolver as well, as we are expecting to create a change output.
- if (auto knapsack_result{KnapsackSolver(all_groups, nTargetValue + coin_selection_params.m_change_fee,
- coin_selection_params.m_min_change_target, coin_selection_params.rng_fast)}) {
+ // So we need to include that for KnapsackSolver and SRD as well, as we are expecting to create a change output.
+ if (!coin_selection_params.m_subtract_fee_outputs) {
+ target_with_change += coin_selection_params.m_change_fee;
+ }
+ if (auto knapsack_result{KnapsackSolver(all_groups, target_with_change, coin_selection_params.m_min_change_target, coin_selection_params.rng_fast)}) {
knapsack_result->ComputeAndSetWaste(coin_selection_params.m_cost_of_change);
results.push_back(*knapsack_result);
}
@@ -409,7 +412,7 @@ std::optional<SelectionResult> AttemptSelection(const CWallet& wallet, const CAm
// barely meets the target. Just use the lower bound change target instead of the randomly
// generated one, since SRD will result in a random change amount anyway; avoid making the
// target needlessly large.
- const CAmount srd_target = nTargetValue + coin_selection_params.m_change_fee + CHANGE_LOWER;
+ const CAmount srd_target = target_with_change + CHANGE_LOWER;
if (auto srd_result{SelectCoinsSRD(positive_groups, srd_target, coin_selection_params.rng_fast)}) {
srd_result->ComputeAndSetWaste(coin_selection_params.m_cost_of_change);
results.push_back(*srd_result);
@@ -760,7 +763,8 @@ static std::optional<CreatedTransactionResult> CreateTransactionInternal(
// vouts to the payees
if (!coin_selection_params.m_subtract_fee_outputs) {
- coin_selection_params.tx_noinputs_size = 11; // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 input count, 1 output count, 1 witness overhead (dummy, flag, stack size)
+ coin_selection_params.tx_noinputs_size = 10; // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 input count, 1 witness overhead (dummy, flag, stack size)
+ coin_selection_params.tx_noinputs_size += GetSizeOfCompactSize(vecSend.size()); // bytes for output count
}
for (const auto& recipient : vecSend)
{
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index d6f47e9954..27202cd7f3 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -198,8 +198,8 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
expected_result.Clear();
// Select 5 Cent
- add_coin(4 * CENT, 4, expected_result);
- add_coin(1 * CENT, 1, expected_result);
+ add_coin(3 * CENT, 3, expected_result);
+ add_coin(2 * CENT, 2, expected_result);
const auto result3 = SelectCoinsBnB(GroupCoins(utxo_pool), 5 * CENT, 0.5 * CENT);
BOOST_CHECK(result3);
BOOST_CHECK(EquivalentResult(expected_result, *result3));
@@ -224,8 +224,9 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
// Select 10 Cent
add_coin(5 * CENT, 5, utxo_pool);
- add_coin(5 * CENT, 5, expected_result);
add_coin(4 * CENT, 4, expected_result);
+ add_coin(3 * CENT, 3, expected_result);
+ add_coin(2 * CENT, 2, expected_result);
add_coin(1 * CENT, 1, expected_result);
const auto result5 = SelectCoinsBnB(GroupCoins(utxo_pool), 10 * CENT, 0.5 * CENT);
BOOST_CHECK(result5);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index d0b093bbb7..041481559b 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2006,6 +2006,35 @@ TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& comp
}
}
+ // Only drop non_witness_utxos if sighash_type != SIGHASH_ANYONECANPAY
+ if ((sighash_type & 0x80) != SIGHASH_ANYONECANPAY) {
+ // Figure out if any non_witness_utxos should be dropped
+ std::vector<unsigned int> to_drop;
+ for (unsigned int i = 0; i < psbtx.inputs.size(); ++i) {
+ const auto& input = psbtx.inputs.at(i);
+ int wit_ver;
+ std::vector<unsigned char> wit_prog;
+ if (input.witness_utxo.IsNull() || !input.witness_utxo.scriptPubKey.IsWitnessProgram(wit_ver, wit_prog)) {
+ // There's a non-segwit input or Segwit v0, so we cannot drop any witness_utxos
+ to_drop.clear();
+ break;
+ }
+ if (wit_ver == 0) {
+ // Segwit v0, so we cannot drop any non_witness_utxos
+ to_drop.clear();
+ break;
+ }
+ if (input.non_witness_utxo) {
+ to_drop.push_back(i);
+ }
+ }
+
+ // Drop the non_witness_utxos that we can drop
+ for (unsigned int i : to_drop) {
+ psbtx.inputs.at(i).non_witness_utxo = nullptr;
+ }
+ }
+
// Complete if every input is now signed
complete = true;
for (const auto& input : psbtx.inputs) {
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 79e0a330b7..8afd3f416d 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -883,12 +883,10 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (result != DBErrors::LOAD_OK)
return result;
- // Last client version to open this wallet, was previously the file version number
+ // Last client version to open this wallet
int last_client = CLIENT_VERSION;
- m_batch->Read(DBKeys::VERSION, last_client);
-
- int wallet_version = pwallet->GetVersion();
- pwallet->WalletLogPrintf("Wallet File Version = %d\n", wallet_version > 0 ? wallet_version : last_client);
+ bool has_last_client = m_batch->Read(DBKeys::VERSION, last_client);
+ pwallet->WalletLogPrintf("Wallet file version = %d, last client version = %d\n", pwallet->GetVersion(), last_client);
pwallet->WalletLogPrintf("Keys: %u plaintext, %u encrypted, %u w/ metadata, %u total. Unknown wallet records: %u\n",
wss.nKeys, wss.nCKeys, wss.nKeyMeta, wss.nKeys + wss.nCKeys, wss.m_unknown_records);
@@ -909,7 +907,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (wss.fIsEncrypted && (last_client == 40000 || last_client == 50000))
return DBErrors::NEED_REWRITE;
- if (last_client < CLIENT_VERSION) // Update
+ if (!has_last_client || last_client != CLIENT_VERSION) // Update
m_batch->Write(DBKeys::VERSION, CLIENT_VERSION);
if (wss.fAnyUnordered)
@@ -1186,13 +1184,36 @@ std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase()
}
/** Return object for accessing temporary in-memory database. */
-std::unique_ptr<WalletDatabase> CreateMockWalletDatabase()
+std::unique_ptr<WalletDatabase> CreateMockWalletDatabase(DatabaseOptions& options)
{
- DatabaseOptions options;
+
+ std::optional<DatabaseFormat> format;
+ if (options.require_format) format = options.require_format;
+ if (!format) {
+#ifdef USE_BDB
+ format = DatabaseFormat::BERKELEY;
+#endif
+#ifdef USE_SQLITE
+ format = DatabaseFormat::SQLITE;
+#endif
+ }
+
+ if (format == DatabaseFormat::SQLITE) {
#ifdef USE_SQLITE
- return std::make_unique<SQLiteDatabase>("", "", options, true);
-#elif USE_BDB
+ return std::make_unique<SQLiteDatabase>(":memory:", "", options, true);
+#endif
+ assert(false);
+ }
+
+#ifdef USE_BDB
return std::make_unique<BerkeleyDatabase>(std::make_shared<BerkeleyEnvironment>(), "", options);
#endif
+ assert(false);
+}
+
+std::unique_ptr<WalletDatabase> CreateMockWalletDatabase()
+{
+ DatabaseOptions options;
+ return CreateMockWalletDatabase(options);
}
} // namespace wallet
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 3dfe781d56..a04ea598b6 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -301,6 +301,7 @@ bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, st
std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase();
/** Return object for accessing temporary in-memory database. */
+std::unique_ptr<WalletDatabase> CreateMockWalletDatabase(DatabaseOptions& options);
std::unique_ptr<WalletDatabase> CreateMockWalletDatabase();
} // namespace wallet