aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/.clang-tidy1
-rw-r--r--src/Makefile.bench.include2
-rw-r--r--src/addrdb.cpp22
-rw-r--r--src/bench/index_blockfilter.cpp43
-rw-r--r--src/bench/parse_hex.cpp36
-rw-r--r--src/bitcoin-chainstate.cpp9
-rw-r--r--src/chain.h67
-rw-r--r--src/common/args.cpp12
-rw-r--r--src/common/args.h5
-rw-r--r--src/compat/compat.h7
-rw-r--r--src/flatfile.cpp9
-rw-r--r--src/i2p.cpp14
-rw-r--r--src/i2p.h9
-rw-r--r--src/index/base.cpp62
-rw-r--r--src/index/base.h16
-rw-r--r--src/index/blockfilterindex.cpp91
-rw-r--r--src/index/blockfilterindex.h7
-rw-r--r--src/index/coinstatsindex.cpp24
-rw-r--r--src/index/txindex.cpp12
-rw-r--r--src/init.cpp111
-rw-r--r--src/kernel/coinstats.cpp3
-rw-r--r--src/kernel/notifications_interface.h8
-rw-r--r--src/logging.h9
-rw-r--r--src/net.cpp79
-rw-r--r--src/net.h33
-rw-r--r--src/net_permissions.cpp28
-rw-r--r--src/net_permissions.h8
-rw-r--r--src/net_processing.cpp234
-rw-r--r--src/netaddress.cpp13
-rw-r--r--src/netaddress.h5
-rw-r--r--src/netbase.cpp221
-rw-r--r--src/netbase.h86
-rw-r--r--src/node/abort.cpp9
-rw-r--r--src/node/abort.h7
-rw-r--r--src/node/blockstorage.cpp111
-rw-r--r--src/node/kernel_notifications.cpp8
-rw-r--r--src/node/kernel_notifications.h5
-rw-r--r--src/node/transaction.h6
-rw-r--r--src/noui.cpp7
-rw-r--r--src/policy/v3_policy.cpp44
-rw-r--r--src/policy/v3_policy.h10
-rw-r--r--src/qt/optionsdialog.cpp14
-rw-r--r--src/rest.cpp20
-rw-r--r--src/rpc/blockchain.cpp34
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/mempool.cpp33
-rw-r--r--src/rpc/net.cpp16
-rw-r--r--src/script/signingprovider.cpp8
-rw-r--r--src/test/compress_tests.cpp33
-rw-r--r--src/test/fuzz/fuzz.cpp2
-rw-r--r--src/test/fuzz/net_permissions.cpp4
-rw-r--r--src/test/fuzz/p2p_transport_serialization.cpp1
-rw-r--r--src/test/fuzz/package_eval.cpp2
-rw-r--r--src/test/fuzz/tx_pool.cpp2
-rw-r--r--src/test/i2p_tests.cpp21
-rw-r--r--src/test/netbase_tests.cpp18
-rw-r--r--src/test/serfloat_tests.cpp116
-rw-r--r--src/test/txpackage_tests.cpp36
-rw-r--r--src/test/txvalidation_tests.cpp90
-rw-r--r--src/test/util/chainstate.h17
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp21
-rw-r--r--src/util/fs_helpers.cpp13
-rw-r--r--src/util/strencodings.cpp4
-rw-r--r--src/validation.cpp410
-rw-r--r--src/validation.h15
-rw-r--r--src/zmq/zmqnotificationinterface.cpp2
-rw-r--r--src/zmq/zmqnotificationinterface.h3
-rw-r--r--src/zmq/zmqpublishnotifier.cpp7
-rw-r--r--src/zmq/zmqpublishnotifier.h6
69 files changed, 1575 insertions, 868 deletions
diff --git a/src/.clang-tidy b/src/.clang-tidy
index bfaa5ab8e7..e4b789dcaa 100644
--- a/src/.clang-tidy
+++ b/src/.clang-tidy
@@ -12,6 +12,7 @@ modernize-use-noexcept,
modernize-use-nullptr,
performance-*,
-performance-avoid-endl,
+-performance-enum-size,
-performance-inefficient-string-concatenation,
-performance-no-int-to-ptr,
-performance-noexcept-move-constructor,
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index b24405ce19..7ba0111fa6 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -34,6 +34,7 @@ bench_bench_bitcoin_SOURCES = \
bench/examples.cpp \
bench/gcs_filter.cpp \
bench/hashpadding.cpp \
+ bench/index_blockfilter.cpp \
bench/load_external.cpp \
bench/lockedpool.cpp \
bench/logging.cpp \
@@ -42,6 +43,7 @@ bench_bench_bitcoin_SOURCES = \
bench/merkle_root.cpp \
bench/nanobench.cpp \
bench/nanobench.h \
+ bench/parse_hex.cpp \
bench/peer_eviction.cpp \
bench/poly1305.cpp \
bench/pool.cpp \
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index fd2a363b8a..14dc314c36 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -44,7 +44,8 @@ bool SerializeDB(Stream& stream, const Data& data)
hashwriter << Params().MessageStart() << data;
stream << hashwriter.GetHash();
} catch (const std::exception& e) {
- return error("%s: Serialize or I/O error - %s", __func__, e.what());
+ LogError("%s: Serialize or I/O error - %s\n", __func__, e.what());
+ return false;
}
return true;
@@ -64,7 +65,8 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (fileout.IsNull()) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to open file %s", __func__, fs::PathToString(pathTmp));
+ LogError("%s: Failed to open file %s\n", __func__, fs::PathToString(pathTmp));
+ return false;
}
// Serialize
@@ -76,14 +78,16 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (!FileCommit(fileout.Get())) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to flush file %s", __func__, fs::PathToString(pathTmp));
+ LogError("%s: Failed to flush file %s\n", __func__, fs::PathToString(pathTmp));
+ return false;
}
fileout.fclose();
// replace existing file, if any, with new file
if (!RenameOver(pathTmp, path)) {
remove(pathTmp);
- return error("%s: Rename-into-place failed", __func__);
+ LogError("%s: Rename-into-place failed\n", __func__);
+ return false;
}
return true;
@@ -140,7 +144,7 @@ bool CBanDB::Write(const banmap_t& banSet)
}
for (const auto& err : errors) {
- error("%s", err);
+ LogError("%s\n", err);
}
return false;
}
@@ -189,7 +193,9 @@ void ReadFromStream(AddrMan& addr, DataStream& ssPeers)
util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args)
{
auto check_addrman = std::clamp<int32_t>(args.GetIntArg("-checkaddrman", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), 0, 1000000);
- auto addrman{std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman)};
+ bool deterministic = HasTestOption(args, "addrman"); // use a deterministic addrman only for tests
+
+ auto addrman{std::make_unique<AddrMan>(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman)};
const auto start{SteadyClock::now()};
const auto path_addr{args.GetDataDirNet() / "peers.dat"};
@@ -198,7 +204,7 @@ util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgro
LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
} catch (const DbNotFoundError&) {
// Addrman can be in an inconsistent state after failure, reset it
- addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman);
+ addrman = std::make_unique<AddrMan>(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman);
LogPrintf("Creating peers.dat because the file was not found (%s)\n", fs::quoted(fs::PathToString(path_addr)));
DumpPeerAddresses(args, *addrman);
} catch (const InvalidAddrManVersionError&) {
@@ -206,7 +212,7 @@ util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgro
return util::Error{strprintf(_("Failed to rename invalid peers.dat file. Please move or delete it and try again."))};
}
// Addrman can be in an inconsistent state after failure, reset it
- addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman);
+ addrman = std::make_unique<AddrMan>(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman);
LogPrintf("Creating new peers.dat because the file version was not compatible (%s). Original backed up to peers.dat.bak\n", fs::quoted(fs::PathToString(path_addr)));
DumpPeerAddresses(args, *addrman);
} catch (const std::exception& e) {
diff --git a/src/bench/index_blockfilter.cpp b/src/bench/index_blockfilter.cpp
new file mode 100644
index 0000000000..5e0bfbfea6
--- /dev/null
+++ b/src/bench/index_blockfilter.cpp
@@ -0,0 +1,43 @@
+// Copyright (c) 2023-present The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#include <bench/bench.h>
+
+#include <addresstype.h>
+#include <index/blockfilterindex.h>
+#include <node/chainstate.h>
+#include <node/context.h>
+#include <test/util/setup_common.h>
+#include <util/strencodings.h>
+
+// Very simple block filter index sync benchmark, only using coinbase outputs.
+static void BlockFilterIndexSync(benchmark::Bench& bench)
+{
+ const auto test_setup = MakeNoLogFileContext<TestChain100Setup>();
+
+ // Create more blocks
+ int CHAIN_SIZE = 600;
+ CPubKey pubkey{ParseHex("02ed26169896db86ced4cbb7b3ecef9859b5952825adbeab998fb5b307e54949c9")};
+ CScript script = GetScriptForDestination(WitnessV0KeyHash(pubkey));
+ std::vector<CMutableTransaction> noTxns;
+ for (int i = 0; i < CHAIN_SIZE - 100; i++) {
+ test_setup->CreateAndProcessBlock(noTxns, script);
+ SetMockTime(GetTime() + 1);
+ }
+ assert(WITH_LOCK(::cs_main, return test_setup->m_node.chainman->ActiveHeight() == CHAIN_SIZE));
+
+ bench.minEpochIterations(5).run([&] {
+ BlockFilterIndex filter_index(interfaces::MakeChain(test_setup->m_node), BlockFilterType::BASIC,
+ /*n_cache_size=*/0, /*f_memory=*/false, /*f_wipe=*/true);
+ assert(filter_index.Init());
+ assert(!filter_index.BlockUntilSyncedToCurrentChain());
+ filter_index.Sync();
+
+ IndexSummary summary = filter_index.GetSummary();
+ assert(summary.synced);
+ assert(summary.best_block_hash == WITH_LOCK(::cs_main, return test_setup->m_node.chainman->ActiveTip()->GetBlockHash()));
+ });
+}
+
+BENCHMARK(BlockFilterIndexSync, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/parse_hex.cpp b/src/bench/parse_hex.cpp
new file mode 100644
index 0000000000..db3ead043c
--- /dev/null
+++ b/src/bench/parse_hex.cpp
@@ -0,0 +1,36 @@
+// Copyright (c) 2024- The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bench/bench.h>
+#include <random.h>
+#include <stddef.h>
+#include <util/strencodings.h>
+#include <cassert>
+#include <optional>
+#include <vector>
+
+std::string generateHexString(size_t length) {
+ const auto hex_digits = "0123456789ABCDEF";
+ FastRandomContext rng(/*fDeterministic=*/true);
+
+ std::string data;
+ while (data.size() < length) {
+ auto digit = hex_digits[rng.randbits(4)];
+ data.push_back(digit);
+ }
+ return data;
+}
+
+static void HexParse(benchmark::Bench& bench)
+{
+ auto data = generateHexString(130); // Generates 678B0EDA0A1FD30904D5A65E3568DB82DB2D918B0AD8DEA18A63FECCB877D07CAD1495C7157584D877420EF38B8DA473A6348B4F51811AC13C786B962BEE5668F9 by default
+
+ bench.batch(data.size()).unit("base16").run([&] {
+ auto result = TryParseHex(data);
+ assert(result != std::nullopt); // make sure we're measuring the successful case
+ ankerl::nanobench::doNotOptimizeAway(result);
+ });
+}
+
+BENCHMARK(HexParse, benchmark::PriorityLevel::HIGH);
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index 3eb64aa344..642af06e82 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -89,14 +89,13 @@ int main(int argc, char* argv[])
{
std::cout << "Warning: " << warning.original << std::endl;
}
- void flushError(const std::string& debug_message) override
+ void flushError(const bilingual_str& message) override
{
- std::cerr << "Error flushing block data to disk: " << debug_message << std::endl;
+ std::cerr << "Error flushing block data to disk: " << message.original << std::endl;
}
- void fatalError(const std::string& debug_message, const bilingual_str& user_message) override
+ void fatalError(const bilingual_str& message) override
{
- std::cerr << "Error: " << debug_message << std::endl;
- std::cerr << (user_message.empty() ? "A fatal internal error occurred." : user_message.original) << std::endl;
+ std::cerr << "Error: " << message.original << std::endl;
}
};
auto notifications = std::make_unique<KernelNotifications>();
diff --git a/src/chain.h b/src/chain.h
index fa165a4aa7..bb70dbd8bc 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -98,16 +98,20 @@ enum BlockStatus : uint32_t {
/**
* Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids,
- * sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
- * parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set.
+ * sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS.
+ *
+ * If a block's validity is at least VALID_TRANSACTIONS, CBlockIndex::nTx will be set. If a block and all previous
+ * blocks back to the genesis block or an assumeutxo snapshot block are at least VALID_TRANSACTIONS,
+ * CBlockIndex::nChainTx will be set.
*/
BLOCK_VALID_TRANSACTIONS = 3,
//! Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends, BIP30.
- //! Implies all parents are either at least VALID_CHAIN, or are ASSUMED_VALID
+ //! Implies all previous blocks back to the genesis block or an assumeutxo snapshot block are at least VALID_CHAIN.
BLOCK_VALID_CHAIN = 4,
- //! Scripts & signatures ok. Implies all parents are either at least VALID_SCRIPTS, or are ASSUMED_VALID.
+ //! Scripts & signatures ok. Implies all previous blocks back to the genesis block or an assumeutxo snapshot block
+ //! are at least VALID_SCRIPTS.
BLOCK_VALID_SCRIPTS = 5,
//! All validity bits.
@@ -124,21 +128,8 @@ enum BlockStatus : uint32_t {
BLOCK_OPT_WITNESS = 128, //!< block data in blk*.dat was received with a witness-enforcing client
- /**
- * If ASSUMED_VALID is set, it means that this block has not been validated
- * and has validity status less than VALID_SCRIPTS. Also that it may have
- * descendant blocks with VALID_SCRIPTS set, because they can be validated
- * based on an assumeutxo snapshot.
- *
- * When an assumeutxo snapshot is loaded, the ASSUMED_VALID flag is added to
- * unvalidated blocks at the snapshot height and below. Then, as the background
- * validation progresses, and these blocks are validated, the ASSUMED_VALID
- * flags are removed. See `doc/design/assumeutxo.md` for details.
- *
- * This flag is only used to implement checks in CheckBlockIndex() and
- * should not be used elsewhere.
- */
- BLOCK_ASSUMED_VALID = 256,
+ BLOCK_STATUS_RESERVED = 256, //!< Unused flag that was previously set on assumeutxo snapshot blocks and their
+ //!< ancestors before they were validated, and unset when they were validated.
};
/** The block chain is a tree shaped structure starting with the
@@ -173,21 +164,16 @@ public:
//! (memory only) Total amount of work (expected number of hashes) in the chain up to and including this block
arith_uint256 nChainWork{};
- //! Number of transactions in this block.
+ //! Number of transactions in this block. This will be nonzero if the block
+ //! reached the VALID_TRANSACTIONS level, and zero otherwise.
//! Note: in a potential headers-first mode, this number cannot be relied upon
- //! Note: this value is faked during UTXO snapshot load to ensure that
- //! LoadBlockIndex() will load index entries for blocks that we lack data for.
- //! @sa ActivateSnapshot
unsigned int nTx{0};
//! (memory only) Number of transactions in the chain up to and including this block.
- //! This value will be non-zero only if and only if transactions for this block and all its parents are available.
+ //! This value will be non-zero if this block and all previous blocks back
+ //! to the genesis block or an assumeutxo snapshot block have reached the
+ //! VALID_TRANSACTIONS level.
//! Change to 64-bit type before 2024 (assuming worst case of 60 byte transactions).
- //!
- //! Note: this value is faked during use of a UTXO snapshot because we don't
- //! have the underlying block data available during snapshot load.
- //! @sa AssumeutxoData
- //! @sa ActivateSnapshot
unsigned int nChainTx{0};
//! Verification status of this block. See enum BlockStatus
@@ -262,15 +248,14 @@ public:
}
/**
- * Check whether this block's and all previous blocks' transactions have been
- * downloaded (and stored to disk) at some point.
+ * Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot block have
+ * reached VALID_TRANSACTIONS and had transactions downloaded (and stored to disk) at some point.
*
* Does not imply the transactions are consensus-valid (ConnectTip might fail)
* Does not imply the transactions are still stored on disk. (IsBlockPruned might return true)
*
- * Note that this will be true for the snapshot base block, if one is loaded (and
- * all subsequent assumed-valid blocks) since its nChainTx value will have been set
- * manually based on the related AssumeutxoData entry.
+ * Note that this will be true for the snapshot base block, if one is loaded, since its nChainTx value will have
+ * been set manually based on the related AssumeutxoData entry.
*/
bool HaveNumChainTxs() const { return nChainTx != 0; }
@@ -318,14 +303,6 @@ public:
return ((nStatus & BLOCK_VALID_MASK) >= nUpTo);
}
- //! @returns true if the block is assumed-valid; this means it is queued to be
- //! validated by a background chainstate.
- bool IsAssumedValid() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
- {
- AssertLockHeld(::cs_main);
- return nStatus & BLOCK_ASSUMED_VALID;
- }
-
//! Raise the validity level of this block index entry.
//! Returns true if the validity was changed.
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
@@ -335,12 +312,6 @@ public:
if (nStatus & BLOCK_FAILED_MASK) return false;
if ((nStatus & BLOCK_VALID_MASK) < nUpTo) {
- // If this block had been marked assumed-valid and we're raising
- // its validity to a certain point, there is no longer an assumption.
- if (nStatus & BLOCK_ASSUMED_VALID && nUpTo >= BLOCK_VALID_SCRIPTS) {
- nStatus &= ~BLOCK_ASSUMED_VALID;
- }
-
nStatus = (nStatus & ~BLOCK_VALID_MASK) | nUpTo;
return true;
}
diff --git a/src/common/args.cpp b/src/common/args.cpp
index a9108e5916..c90eb0c685 100644
--- a/src/common/args.cpp
+++ b/src/common/args.cpp
@@ -682,6 +682,18 @@ std::string HelpMessageOpt(const std::string &option, const std::string &message
std::string("\n\n");
}
+const std::vector<std::string> TEST_OPTIONS_DOC{
+ "addrman (use deterministic addrman)",
+};
+
+bool HasTestOption(const ArgsManager& args, const std::string& test_option)
+{
+ const auto options = args.GetArgs("-test");
+ return std::any_of(options.begin(), options.end(), [test_option](const auto& option) {
+ return option == test_option;
+ });
+}
+
fs::path GetDefaultDataDir()
{
// Windows: C:\Users\Username\AppData\Roaming\Bitcoin
diff --git a/src/common/args.h b/src/common/args.h
index 6451b194d1..78a61313b9 100644
--- a/src/common/args.h
+++ b/src/common/args.h
@@ -447,6 +447,11 @@ bool HelpRequested(const ArgsManager& args);
/** Add help options to the args manager */
void SetupHelpOptions(ArgsManager& args);
+extern const std::vector<std::string> TEST_OPTIONS_DOC;
+
+/** Checks if a particular test option is present in -test command-line arg options */
+bool HasTestOption(const ArgsManager& args, const std::string& test_option);
+
/**
* Format a string to be used as group of options in help messages
*
diff --git a/src/compat/compat.h b/src/compat/compat.h
index 9ff9a335f8..366c648ae7 100644
--- a/src/compat/compat.h
+++ b/src/compat/compat.h
@@ -32,6 +32,13 @@
#include <unistd.h> // IWYU pragma: export
#endif
+// Windows does not have `sa_family_t` - it defines `sockaddr::sa_family` as `u_short`.
+// Thus define `sa_family_t` on Windows too so that the rest of the code can use `sa_family_t`.
+// See https://learn.microsoft.com/en-us/windows/win32/api/winsock/ns-winsock-sockaddr#syntax
+#ifdef WIN32
+typedef u_short sa_family_t;
+#endif
+
// We map Linux / BSD error functions and codes, to the equivalent
// Windows definitions, and use the WSA* names throughout our code.
// Note that glibc defines EWOULDBLOCK as EAGAIN (see errno.h).
diff --git a/src/flatfile.cpp b/src/flatfile.cpp
index 59861a08ad..2bff663d8b 100644
--- a/src/flatfile.cpp
+++ b/src/flatfile.cpp
@@ -82,15 +82,18 @@ bool FlatFileSeq::Flush(const FlatFilePos& pos, bool finalize)
{
FILE* file = Open(FlatFilePos(pos.nFile, 0)); // Avoid fseek to nPos
if (!file) {
- return error("%s: failed to open file %d", __func__, pos.nFile);
+ LogError("%s: failed to open file %d\n", __func__, pos.nFile);
+ return false;
}
if (finalize && !TruncateFile(file, pos.nPos)) {
fclose(file);
- return error("%s: failed to truncate file %d", __func__, pos.nFile);
+ LogError("%s: failed to truncate file %d\n", __func__, pos.nFile);
+ return false;
}
if (!FileCommit(file)) {
fclose(file);
- return error("%s: failed to commit file %d", __func__, pos.nFile);
+ LogError("%s: failed to commit file %d\n", __func__, pos.nFile);
+ return false;
}
DirectoryCommit(m_dir);
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 02f2c1cea2..962adb124d 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -115,7 +115,7 @@ static CNetAddr DestB64ToAddr(const std::string& dest)
namespace sam {
Session::Session(const fs::path& private_key_file,
- const CService& control_host,
+ const Proxy& control_host,
CThreadInterrupt* interrupt)
: m_private_key_file{private_key_file},
m_control_host{control_host},
@@ -124,7 +124,7 @@ Session::Session(const fs::path& private_key_file,
{
}
-Session::Session(const CService& control_host, CThreadInterrupt* interrupt)
+Session::Session(const Proxy& control_host, CThreadInterrupt* interrupt)
: m_control_host{control_host},
m_interrupt{interrupt},
m_transient{true}
@@ -327,14 +327,10 @@ Session::Reply Session::SendRequestAndGetReply(const Sock& sock,
std::unique_ptr<Sock> Session::Hello() const
{
- auto sock = CreateSock(m_control_host);
+ auto sock = m_control_host.Connect();
if (!sock) {
- throw std::runtime_error("Cannot create socket");
- }
-
- if (!ConnectSocketDirectly(m_control_host, *sock, nConnectTimeout, true)) {
- throw std::runtime_error(strprintf("Cannot connect to %s", m_control_host.ToStringAddrPort()));
+ throw std::runtime_error(strprintf("Cannot connect to %s", m_control_host.ToString()));
}
SendRequestAndGetReply(*sock, "HELLO VERSION MIN=3.1 MAX=3.1");
@@ -418,7 +414,7 @@ void Session::CreateIfNotCreatedAlready()
const auto session_type = m_transient ? "transient" : "persistent";
const auto session_id = GetRandHash().GetHex().substr(0, 10); // full is overkill, too verbose in the logs
- Log("Creating %s SAM session %s with %s", session_type, session_id, m_control_host.ToStringAddrPort());
+ Log("Creating %s SAM session %s with %s", session_type, session_id, m_control_host.ToString());
auto sock = Hello();
diff --git a/src/i2p.h b/src/i2p.h
index 375abaccfc..8b0f1e1182 100644
--- a/src/i2p.h
+++ b/src/i2p.h
@@ -7,6 +7,7 @@
#include <compat/compat.h>
#include <netaddress.h>
+#include <netbase.h>
#include <sync.h>
#include <util/fs.h>
#include <util/sock.h>
@@ -67,7 +68,7 @@ public:
* `Session` object.
*/
Session(const fs::path& private_key_file,
- const CService& control_host,
+ const Proxy& control_host,
CThreadInterrupt* interrupt);
/**
@@ -81,7 +82,7 @@ public:
* `CThreadInterrupt` object is saved, so it must not be destroyed earlier than this
* `Session` object.
*/
- Session(const CService& control_host, CThreadInterrupt* interrupt);
+ Session(const Proxy& control_host, CThreadInterrupt* interrupt);
/**
* Destroy the session, closing the internally used sockets. The sockets that have been
@@ -235,9 +236,9 @@ private:
const fs::path m_private_key_file;
/**
- * The host and port of the SAM control service.
+ * The SAM control service proxy.
*/
- const CService m_control_host;
+ const Proxy m_control_host;
/**
* Cease network activity when this is signaled.
diff --git a/src/index/base.cpp b/src/index/base.cpp
index 2287437f8f..13d8ba5a01 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -31,7 +31,7 @@ template <typename... Args>
void BaseIndex::FatalErrorf(const char* fmt, const Args&... args)
{
auto message = tfm::format(fmt, args...);
- node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, message);
+ node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, Untranslated(message));
}
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
@@ -141,7 +141,7 @@ static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain&
return chain.Next(chain.FindFork(pindex_prev));
}
-void BaseIndex::ThreadSync()
+void BaseIndex::Sync()
{
const CBlockIndex* pindex = m_best_block_index.load();
if (!m_synced) {
@@ -159,37 +159,20 @@ void BaseIndex::ThreadSync()
return;
}
- {
- LOCK(cs_main);
- const CBlockIndex* pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
- if (!pindex_next) {
- SetBestBlockIndex(pindex);
- m_synced = true;
- // No need to handle errors in Commit. See rationale above.
- Commit();
- break;
- }
- if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
- FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
- __func__, GetName());
- return;
- }
- pindex = pindex_next;
- }
-
- auto current_time{std::chrono::steady_clock::now()};
- if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
- LogPrintf("Syncing %s with block chain from height %d\n",
- GetName(), pindex->nHeight);
- last_log_time = current_time;
- }
-
- if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
- SetBestBlockIndex(pindex->pprev);
- last_locator_write_time = current_time;
+ const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
+ if (!pindex_next) {
+ SetBestBlockIndex(pindex);
+ m_synced = true;
// No need to handle errors in Commit. See rationale above.
Commit();
+ break;
}
+ if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
+ FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName());
+ return;
+ }
+ pindex = pindex_next;
+
CBlock block;
interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex);
@@ -205,6 +188,20 @@ void BaseIndex::ThreadSync()
__func__, pindex->GetBlockHash().ToString());
return;
}
+
+ auto current_time{std::chrono::steady_clock::now()};
+ if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
+ LogPrintf("Syncing %s with block chain from height %d\n",
+ GetName(), pindex->nHeight);
+ last_log_time = current_time;
+ }
+
+ if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
+ SetBestBlockIndex(pindex);
+ last_locator_write_time = current_time;
+ // No need to handle errors in Commit. See rationale above.
+ Commit();
+ }
}
}
@@ -229,7 +226,8 @@ bool BaseIndex::Commit()
}
}
if (!ok) {
- return error("%s: Failed to commit latest %s state", __func__, GetName());
+ LogError("%s: Failed to commit latest %s state\n", __func__, GetName());
+ return false;
}
return true;
}
@@ -393,7 +391,7 @@ bool BaseIndex::StartBackgroundSync()
{
if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
- m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
+ m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
return true;
}
diff --git a/src/index/base.h b/src/index/base.h
index 154061fb19..0eb1d9ca3b 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -78,13 +78,6 @@ private:
std::thread m_thread_sync;
CThreadInterrupt m_interrupt;
- /// Sync the index with the block index starting from the current best block.
- /// Intended to be run in its own thread, m_thread_sync, and can be
- /// interrupted with m_interrupt. Once the index gets in sync, the m_synced
- /// flag is set and the BlockConnected ValidationInterface callback takes
- /// over and the sync thread exits.
- void ThreadSync();
-
/// Write the current index state (eg. chain block locator and subclass-specific items) to disk.
///
/// Recommendations for error handling:
@@ -152,9 +145,16 @@ public:
/// validation interface so that it stays in sync with blockchain updates.
[[nodiscard]] bool Init();
- /// Starts the initial sync process.
+ /// Starts the initial sync process on a background thread.
[[nodiscard]] bool StartBackgroundSync();
+ /// Sync the index with the block index starting from the current best block.
+ /// Intended to be run in its own thread, m_thread_sync, and can be
+ /// interrupted with m_interrupt. Once the index gets in sync, the m_synced
+ /// flag is set and the BlockConnected ValidationInterface callback takes
+ /// over and the sync thread exits.
+ void Sync();
+
/// Stops the instance from staying in sync with blockchain updates.
void Stop();
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 58f777b326..41bdca9df5 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -119,14 +119,25 @@ bool BlockFilterIndex::CustomInit(const std::optional<interfaces::BlockKey>& blo
// indicate database corruption or a disk failure, and starting the index would cause
// further corruption.
if (m_db->Exists(DB_FILTER_POS)) {
- return error("%s: Cannot read current %s state; index may be corrupted",
+ LogError("%s: Cannot read current %s state; index may be corrupted\n",
__func__, GetName());
+ return false;
}
// If the DB_FILTER_POS is not set, then initialize to the first location.
m_next_filter_pos.nFile = 0;
m_next_filter_pos.nPos = 0;
}
+
+ if (block) {
+ auto op_last_header = ReadFilterHeader(block->height, block->hash);
+ if (!op_last_header) {
+ LogError("Cannot read last block filter header; index may be corrupted\n");
+ return false;
+ }
+ m_last_header = *op_last_header;
+ }
+
return true;
}
@@ -137,10 +148,12 @@ bool BlockFilterIndex::CustomCommit(CDBBatch& batch)
// Flush current filter file to disk.
AutoFile file{m_filter_fileseq->Open(pos)};
if (file.IsNull()) {
- return error("%s: Failed to open filter file %d", __func__, pos.nFile);
+ LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile);
+ return false;
}
if (!FileCommit(file.Get())) {
- return error("%s: Failed to commit filter file %d", __func__, pos.nFile);
+ LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile);
+ return false;
}
batch.Write(DB_FILTER_POS, pos);
@@ -159,11 +172,15 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256&
std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
- if (Hash(encoded_filter) != hash) return error("Checksum mismatch in filter decode.");
+ if (Hash(encoded_filter) != hash) {
+ LogError("Checksum mismatch in filter decode.\n");
+ return false;
+ }
filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true);
}
catch (const std::exception& e) {
- return error("%s: Failed to deserialize block filter from disk: %s", __func__, e.what());
+ LogError("%s: Failed to deserialize block filter from disk: %s\n", __func__, e.what());
+ return false;
}
return true;
@@ -215,10 +232,25 @@ size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter&
return data_size;
}
+std::optional<uint256> BlockFilterIndex::ReadFilterHeader(int height, const uint256& expected_block_hash)
+{
+ std::pair<uint256, DBVal> read_out;
+ if (!m_db->Read(DBHeightKey(height), read_out)) {
+ return std::nullopt;
+ }
+
+ if (read_out.first != expected_block_hash) {
+ LogError("%s: previous block header belongs to unexpected block %s; expected %s\n",
+ __func__, read_out.first.ToString(), expected_block_hash.ToString());
+ return std::nullopt;
+ }
+
+ return read_out.second.header;
+}
+
bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block)
{
CBlockUndo block_undo;
- uint256 prev_header;
if (block.height > 0) {
// pindex variable gives indexing code access to node internals. It
@@ -227,33 +259,28 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block)
if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) {
return false;
}
-
- std::pair<uint256, DBVal> read_out;
- if (!m_db->Read(DBHeightKey(block.height - 1), read_out)) {
- return false;
- }
-
- uint256 expected_block_hash = *Assert(block.prev_hash);
- if (read_out.first != expected_block_hash) {
- return error("%s: previous block header belongs to unexpected block %s; expected %s",
- __func__, read_out.first.ToString(), expected_block_hash.ToString());
- }
-
- prev_header = read_out.second.header;
}
BlockFilter filter(m_filter_type, *Assert(block.data), block_undo);
+ const uint256& header = filter.ComputeHeader(m_last_header);
+ bool res = Write(filter, block.height, header);
+ if (res) m_last_header = header; // update last header
+ return res;
+}
+
+bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header)
+{
size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter);
if (bytes_written == 0) return false;
std::pair<uint256, DBVal> value;
- value.first = block.hash;
+ value.first = filter.GetBlockHash();
value.second.hash = filter.GetHash();
- value.second.header = filter.ComputeHeader(prev_header);
+ value.second.header = filter_header;
value.second.pos = m_next_filter_pos;
- if (!m_db->Write(DBHeightKey(block.height), value)) {
+ if (!m_db->Write(DBHeightKey(block_height), value)) {
return false;
}
@@ -270,14 +297,16 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block)
for (int height = start_height; height <= stop_height; ++height) {
if (!db_it.GetKey(key) || key.height != height) {
- return error("%s: unexpected key in %s: expected (%c, %d)",
+ LogError("%s: unexpected key in %s: expected (%c, %d)\n",
__func__, index_name, DB_BLOCK_HEIGHT, height);
+ return false;
}
std::pair<uint256, DBVal> value;
if (!db_it.GetValue(value)) {
- return error("%s: unable to read value in %s at key (%c, %d)",
+ LogError("%s: unable to read value in %s at key (%c, %d)\n",
__func__, index_name, DB_BLOCK_HEIGHT, height);
+ return false;
}
batch.Write(DBHashKey(value.first), std::move(value.second));
@@ -305,6 +334,8 @@ bool BlockFilterIndex::CustomRewind(const interfaces::BlockKey& current_tip, con
batch.Write(DB_FILTER_POS, m_next_filter_pos);
if (!m_db->WriteBatch(batch)) return false;
+ // Update cached header
+ m_last_header = *Assert(ReadFilterHeader(new_tip.height, new_tip.hash));
return true;
}
@@ -330,11 +361,13 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start
const CBlockIndex* stop_index, std::vector<DBVal>& results)
{
if (start_height < 0) {
- return error("%s: start height (%d) is negative", __func__, start_height);
+ LogError("%s: start height (%d) is negative\n", __func__, start_height);
+ return false;
}
if (start_height > stop_index->nHeight) {
- return error("%s: start height (%d) is greater than stop height (%d)",
+ LogError("%s: start height (%d) is greater than stop height (%d)\n",
__func__, start_height, stop_index->nHeight);
+ return false;
}
size_t results_size = static_cast<size_t>(stop_index->nHeight - start_height + 1);
@@ -350,8 +383,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start
size_t i = static_cast<size_t>(height - start_height);
if (!db_it->GetValue(values[i])) {
- return error("%s: unable to read value in %s at key (%c, %d)",
+ LogError("%s: unable to read value in %s at key (%c, %d)\n",
__func__, index_name, DB_BLOCK_HEIGHT, height);
+ return false;
}
db_it->Next();
@@ -373,8 +407,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start
}
if (!db.Read(DBHashKey(block_hash), results[i])) {
- return error("%s: unable to read value in %s at key (%c, %s)",
+ LogError("%s: unable to read value in %s at key (%c, %s)\n",
__func__, index_name, DB_BLOCK_HASH, block_hash.ToString());
+ return false;
}
}
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index 10a1cfd2ee..cdb9563fb8 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -42,8 +42,15 @@ private:
/** cache of block hash to filter header, to avoid disk access when responding to getcfcheckpt. */
std::unordered_map<uint256, uint256, FilterHeaderHasher> m_headers_cache GUARDED_BY(m_cs_headers_cache);
+ // Last computed header to avoid disk reads on every new block.
+ uint256 m_last_header{};
+
bool AllowPrune() const override { return true; }
+ bool Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header);
+
+ std::optional<uint256> ReadFilterHeader(int height, const uint256& expected_block_hash);
+
protected:
bool CustomInit(const std::optional<interfaces::BlockKey>& block) override;
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index ecd3fd21b5..dff8e50a4e 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -138,8 +138,9 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block)
read_out.first.ToString(), expected_block_hash.ToString());
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
- return error("%s: previous block header not found; expected %s",
+ LogError("%s: previous block header not found; expected %s\n",
__func__, expected_block_hash.ToString());
+ return false;
}
}
@@ -245,14 +246,16 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block)
for (int height = start_height; height <= stop_height; ++height) {
if (!db_it.GetKey(key) || key.height != height) {
- return error("%s: unexpected key in %s: expected (%c, %d)",
+ LogError("%s: unexpected key in %s: expected (%c, %d)\n",
__func__, index_name, DB_BLOCK_HEIGHT, height);
+ return false;
}
std::pair<uint256, DBVal> value;
if (!db_it.GetValue(value)) {
- return error("%s: unable to read value in %s at key (%c, %d)",
+ LogError("%s: unable to read value in %s at key (%c, %d)\n",
__func__, index_name, DB_BLOCK_HEIGHT, height);
+ return false;
}
batch.Write(DBHashKey(value.first), std::move(value.second));
@@ -285,8 +288,9 @@ bool CoinStatsIndex::CustomRewind(const interfaces::BlockKey& current_tip, const
CBlock block;
if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *iter_tip)) {
- return error("%s: Failed to read block %s from disk",
+ LogError("%s: Failed to read block %s from disk\n",
__func__, iter_tip->GetBlockHash().ToString());
+ return false;
}
if (!ReverseBlock(block, iter_tip)) {
@@ -353,23 +357,26 @@ bool CoinStatsIndex::CustomInit(const std::optional<interfaces::BlockKey>& block
// exist. Any other errors indicate database corruption or a disk
// failure, and starting the index would cause further corruption.
if (m_db->Exists(DB_MUHASH)) {
- return error("%s: Cannot read current %s state; index may be corrupted",
+ LogError("%s: Cannot read current %s state; index may be corrupted\n",
__func__, GetName());
+ return false;
}
}
if (block) {
DBVal entry;
if (!LookUpOne(*m_db, *block, entry)) {
- return error("%s: Cannot read current %s state; index may be corrupted",
+ LogError("%s: Cannot read current %s state; index may be corrupted\n",
__func__, GetName());
+ return false;
}
uint256 out;
m_muhash.Finalize(out);
if (entry.muhash != out) {
- return error("%s: Cannot read current %s state; index may be corrupted",
+ LogError("%s: Cannot read current %s state; index may be corrupted\n",
__func__, GetName());
+ return false;
}
m_transaction_output_count = entry.transaction_output_count;
@@ -422,8 +429,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
read_out.first.ToString(), expected_block_hash.ToString());
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
- return error("%s: previous block header not found; expected %s",
+ LogError("%s: previous block header not found; expected %s\n",
__func__, expected_block_hash.ToString());
+ return false;
}
}
}
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 4983926e68..80f615ed0e 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -81,20 +81,24 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
AutoFile file{m_chainstate->m_blockman.OpenBlockFile(postx, true)};
if (file.IsNull()) {
- return error("%s: OpenBlockFile failed", __func__);
+ LogError("%s: OpenBlockFile failed\n", __func__);
+ return false;
}
CBlockHeader header;
try {
file >> header;
if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) {
- return error("%s: fseek(...) failed", __func__);
+ LogError("%s: fseek(...) failed\n", __func__);
+ return false;
}
file >> TX_WITH_WITNESS(tx);
} catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
+ LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what());
+ return false;
}
if (tx->GetHash() != tx_hash) {
- return error("%s: txid mismatch", __func__);
+ LogError("%s: txid mismatch\n", __func__);
+ return false;
}
block_hash = header.GetHash();
return true;
diff --git a/src/init.cpp b/src/init.cpp
index 9ea7b881cb..885c0673dd 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -256,12 +256,8 @@ void Interrupt(NodeContext& node)
InterruptMapPort();
if (node.connman)
node.connman->Interrupt();
- if (g_txindex) {
- g_txindex->Interrupt();
- }
- ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Interrupt(); });
- if (g_coin_stats_index) {
- g_coin_stats_index->Interrupt();
+ for (auto* index : node.indexes) {
+ index->Interrupt();
}
}
@@ -337,16 +333,11 @@ void Shutdown(NodeContext& node)
if (node.validation_signals) node.validation_signals->FlushBackgroundCallbacks();
// Stop and delete all indexes only after flushing background callbacks.
- if (g_txindex) {
- g_txindex->Stop();
- g_txindex.reset();
- }
- if (g_coin_stats_index) {
- g_coin_stats_index->Stop();
- g_coin_stats_index.reset();
- }
- ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Stop(); });
+ for (auto* index : node.indexes) index->Stop();
+ if (g_txindex) g_txindex.reset();
+ if (g_coin_stats_index) g_coin_stats_index.reset();
DestroyAllBlockFilterIndexes();
+ node.indexes.clear(); // all instances are nullptr now
// Any future callbacks will be dropped. This should absolutely be safe - if
// missing a callback results in an unrecoverable situation, unclean shutdown
@@ -477,7 +468,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
argsman.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Disables automatic broadcast and rebroadcast of transactions, unless the source peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-coinstatsindex", strprintf("Maintain coinstats index used by the gettxoutsetinfo RPC (default: %u)", DEFAULT_COINSTATSINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location (only useable from command line, not configuration file) (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -534,7 +525,11 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection memory usage for the send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by outbound peers forward or backward by this amount (default: %u seconds).", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target per 24h. Limit does not apply to peers with 'download' permission or blocks created within past week. 0 = no limit (default: %s). Optional suffix units [k|K|m|M|g|G|t|T] (default: M). Lowercase is 1000 base while uppercase is 1024 base", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+#if HAVE_SOCKADDR_UN
+ argsman.AddArg("-onion=<ip:port|path>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy). May be a local file path prefixed with 'unix:'.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+#else
argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+#endif
argsman.AddArg("-i2psam=<ip:port>", "I2P SAM proxy to reach I2P peers and accept I2P connections (default: none)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-i2pacceptincoming", strprintf("Whether to accept inbound I2P connections (default: %i). Ignored if -i2psam is not set. Listening for inbound I2P connections is done through the SAM proxy, not by binding to a local address and port.", DEFAULT_I2P_ACCEPT_INCOMING), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-onlynet=<net>", "Make automatic outbound connections only to network <net> (" + Join(GetNetworkNames(), ", ") + "). Inbound and manual connections are not affected by this option. It can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -545,7 +540,11 @@ void SetupServerArgs(ArgsManager& argsman)
// TODO: remove the sentence "Nodes not using ... incoming connections." once the changes from
// https://github.com/bitcoin/bitcoin/pull/23542 have become widespread.
argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port>. Nodes not using the default ports (default: %u, testnet: %u, signet: %u, regtest: %u) are unlikely to get incoming connections. Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+#if HAVE_SOCKADDR_UN
+ argsman.AddArg("-proxy=<ip:port|path>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled). May be a local file path prefixed with 'unix:' if the proxy supports it.", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION);
+#else
argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION);
+#endif
argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -571,9 +570,11 @@ void SetupServerArgs(ArgsManager& argsman)
"Use [host]:port notation for IPv6. Allowed permissions: " + Join(NET_PERMISSIONS_DOC, ", ") + ". "
"Specify multiple permissions separated by commas (default: download,noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or "
+ argsman.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers using the given IP address (e.g. 1.2.3.4) or "
"CIDR-notated network (e.g. 1.2.3.0/24). Uses the same permissions as "
- "-whitebind. Can be specified multiple times." , ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ "-whitebind. "
+ "Additional flags \"in\" and \"out\" control whether permissions apply to incoming connections and/or manual (default: incoming only). "
+ "Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
g_wallet_init_interface.AddWalletOptions(argsman);
@@ -614,7 +615,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-test=<option>", "Pass a test-only option. Options include : " + Join(TEST_OPTIONS_DOC, ", ") + ".", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_BYTES >> 20), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
@@ -643,8 +644,8 @@ void SetupServerArgs(ArgsManager& argsman)
OptionsCategory::NODE_RELAY);
argsman.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kvB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
argsman.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
@@ -1028,6 +1029,22 @@ bool AppInitParameterInteraction(const ArgsManager& args)
if (args.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
+ if (args.IsArgSet("-test")) {
+ if (chainparams.GetChainType() != ChainType::REGTEST) {
+ return InitError(Untranslated("-test=<option> can only be used with regtest"));
+ }
+ const std::vector<std::string> options = args.GetArgs("-test");
+ for (const std::string& option : options) {
+ auto it = std::find_if(TEST_OPTIONS_DOC.begin(), TEST_OPTIONS_DOC.end(), [&option](const std::string& doc_option) {
+ size_t pos = doc_option.find(" (");
+ return (pos != std::string::npos) && (doc_option.substr(0, pos) == option);
+ });
+ if (it == TEST_OPTIONS_DOC.end()) {
+ InitWarning(strprintf(_("Unrecognised option \"%s\" provided in -test=<option>."), option));
+ }
+ }
+ }
+
// Also report errors from parsing before daemonization
{
kernel::Notifications notifications{};
@@ -1305,7 +1322,14 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
std::string host_out;
uint16_t port_out{0};
if (!SplitHostPort(socket_addr, port_out, host_out)) {
+#if HAVE_SOCKADDR_UN
+ // Allow unix domain sockets for -proxy and -onion e.g. unix:/some/file/path
+ if ((port_option != "-proxy" && port_option != "-onion") || socket_addr.find(ADDR_PREFIX_UNIX) != 0) {
+ return InitError(InvalidPortErrMsg(port_option, socket_addr));
+ }
+#else
return InitError(InvalidPortErrMsg(port_option, socket_addr));
+#endif
}
}
}
@@ -1372,12 +1396,18 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// -noproxy (or -proxy=0) as well as the empty string can be used to not set a proxy, this is the default
std::string proxyArg = args.GetArg("-proxy", "");
if (proxyArg != "" && proxyArg != "0") {
- const std::optional<CService> proxyAddr{Lookup(proxyArg, 9050, fNameLookup)};
- if (!proxyAddr.has_value()) {
- return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'"), proxyArg));
+ Proxy addrProxy;
+ if (IsUnixSocketPath(proxyArg)) {
+ addrProxy = Proxy(proxyArg, proxyRandomize);
+ } else {
+ const std::optional<CService> proxyAddr{Lookup(proxyArg, 9050, fNameLookup)};
+ if (!proxyAddr.has_value()) {
+ return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'"), proxyArg));
+ }
+
+ addrProxy = Proxy(proxyAddr.value(), proxyRandomize);
}
- Proxy addrProxy = Proxy(proxyAddr.value(), proxyRandomize);
if (!addrProxy.IsValid())
return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'"), proxyArg));
@@ -1403,11 +1433,16 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
"reaching the Tor network is explicitly forbidden: -onion=0"));
}
} else {
- const std::optional<CService> addr{Lookup(onionArg, 9050, fNameLookup)};
- if (!addr.has_value() || !addr->IsValid()) {
- return InitError(strprintf(_("Invalid -onion address or hostname: '%s'"), onionArg));
+ if (IsUnixSocketPath(onionArg)) {
+ onion_proxy = Proxy(onionArg, proxyRandomize);
+ } else {
+ const std::optional<CService> addr{Lookup(onionArg, 9050, fNameLookup)};
+ if (!addr.has_value() || !addr->IsValid()) {
+ return InitError(strprintf(_("Invalid -onion address or hostname: '%s'"), onionArg));
+ }
+
+ onion_proxy = Proxy(addr.value(), proxyRandomize);
}
- onion_proxy = Proxy{addr.value(), proxyRandomize};
}
}
@@ -1436,9 +1471,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
#if ENABLE_ZMQ
g_zmq_notification_interface = CZMQNotificationInterface::Create(
- [&chainman = node.chainman](CBlock& block, const CBlockIndex& index) {
+ [&chainman = node.chainman](std::vector<uint8_t>& block, const CBlockIndex& index) {
assert(chainman);
- return chainman->m_blockman.ReadBlockFromDisk(block, index);
+ return chainman->m_blockman.ReadRawBlockFromDisk(block, WITH_LOCK(cs_main, return index.GetBlockPos()));
});
if (g_zmq_notification_interface) {
@@ -1722,7 +1757,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// Start indexes initial sync
if (!StartIndexBackgroundSync(node)) {
bilingual_str err_str = _("Failed to start indexes, shutting down..");
- chainman.GetNotifications().fatalError(err_str.original, err_str);
+ chainman.GetNotifications().fatalError(err_str);
return;
}
// Load mempool from disk
@@ -1784,6 +1819,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
connOptions.m_added_nodes = args.GetArgs("-addnode");
connOptions.nMaxOutboundLimit = *opt_max_upload;
connOptions.m_peer_connect_timeout = peer_connect_timeout;
+ connOptions.whitelist_forcerelay = args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY);
+ connOptions.whitelist_relay = args.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY);
// Port to bind to if `-bind=addr` is provided without a `:port` suffix.
const uint16_t default_bind_port =
@@ -1868,9 +1905,15 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
for (const auto& net : args.GetArgs("-whitelist")) {
NetWhitelistPermissions subnet;
+ ConnectionDirection connection_direction;
bilingual_str error;
- if (!NetWhitelistPermissions::TryParse(net, subnet, error)) return InitError(error);
- connOptions.vWhitelistedRange.push_back(subnet);
+ if (!NetWhitelistPermissions::TryParse(net, subnet, connection_direction, error)) return InitError(error);
+ if (connection_direction & ConnectionDirection::In) {
+ connOptions.vWhitelistedRangeIncoming.push_back(subnet);
+ }
+ if (connection_direction & ConnectionDirection::Out) {
+ connOptions.vWhitelistedRangeOutgoing.push_back(subnet);
+ }
}
connOptions.vSeedNodes = args.GetArgs("-seednode");
diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp
index ff8a33e804..81c496ab34 100644
--- a/src/kernel/coinstats.cpp
+++ b/src/kernel/coinstats.cpp
@@ -134,7 +134,8 @@ static bool ComputeUTXOStats(CCoinsView* view, CCoinsStats& stats, T hash_obj, c
outputs[key.n] = std::move(coin);
stats.coins_count++;
} else {
- return error("%s: unable to read value", __func__);
+ LogError("%s: unable to read value\n", __func__);
+ return false;
}
pcursor->Next();
}
diff --git a/src/kernel/notifications_interface.h b/src/kernel/notifications_interface.h
index c5e77b0df9..7283a88e86 100644
--- a/src/kernel/notifications_interface.h
+++ b/src/kernel/notifications_interface.h
@@ -5,14 +5,12 @@
#ifndef BITCOIN_KERNEL_NOTIFICATIONS_INTERFACE_H
#define BITCOIN_KERNEL_NOTIFICATIONS_INTERFACE_H
-#include <util/translation.h>
-
#include <cstdint>
-#include <string>
#include <variant>
class CBlockIndex;
enum class SynchronizationState;
+struct bilingual_str;
namespace kernel {
@@ -48,7 +46,7 @@ public:
//! perform. Applications can choose to handle the flush error notification
//! by logging the error, or notifying the user, or triggering an early
//! shutdown as a precaution against causing more errors.
- virtual void flushError(const std::string& debug_message) {}
+ virtual void flushError(const bilingual_str& message) {}
//! The fatal error notification is sent to notify the user when an error
//! occurs in kernel code that can't be recovered from. After this
@@ -57,7 +55,7 @@ public:
//! handle the fatal error notification by logging the error, or notifying
//! the user, or triggering an early shutdown as a precaution against
//! causing more errors.
- virtual void fatalError(const std::string& debug_message, const bilingual_str& user_message = {}) {}
+ virtual void fatalError(const bilingual_str& message) {}
};
} // namespace kernel
diff --git a/src/logging.h b/src/logging.h
index 525e0aec6d..2d358a52f1 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -215,7 +215,7 @@ static inline bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level leve
/** Return true if str parses as a log category and set the flag */
bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str);
-// Be conservative when using LogPrintf/error or other things which
+// Be conservative when using functions that
// unconditionally log to debug.log! It should not be the case that an inbound
// peer can fill up a user's disk with debug.log entries.
@@ -263,11 +263,4 @@ static inline void LogPrintf_(const std::string& logging_function, const std::st
// Deprecated conditional logging
#define LogPrint(category, ...) LogDebug(category, __VA_ARGS__)
-template <typename... Args>
-bool error(const char* fmt, const Args&... args)
-{
- LogPrintf("ERROR: %s\n", tfm::format(fmt, args...));
- return false;
-}
-
#endif // BITCOIN_LOGGING_H
diff --git a/src/net.cpp b/src/net.cpp
index 7c82f01d75..e388f05b03 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -238,10 +238,6 @@ static int GetnScore(const CService& addr)
std::optional<CService> GetLocalAddrForPeer(CNode& node)
{
CService addrLocal{GetLocalAddress(node)};
- if (gArgs.GetBoolArg("-addrmantest", false)) {
- // use IPv4 loopback during addrmantest
- addrLocal = CService(LookupNumeric("127.0.0.1", GetListenPort()));
- }
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
@@ -261,8 +257,7 @@ std::optional<CService> GetLocalAddrForPeer(CNode& node)
addrLocal.SetIP(node.GetAddrLocal());
}
}
- if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false))
- {
+ if (addrLocal.IsRoutable()) {
LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToStringAddrPort(), node.GetId());
return addrLocal;
}
@@ -442,7 +437,6 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
}
// Connect
- bool connected = false;
std::unique_ptr<Sock> sock;
Proxy proxy;
CAddress addr_bind;
@@ -455,6 +449,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (addrConnect.IsI2P() && use_proxy) {
i2p::Connection conn;
+ bool connected{false};
if (m_i2p_sam_session) {
connected = m_i2p_sam_session->Connect(addrConnect, conn, proxyConnectionFailed);
@@ -463,7 +458,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
LOCK(m_unused_i2p_sessions_mutex);
if (m_unused_i2p_sessions.empty()) {
i2p_transient_session =
- std::make_unique<i2p::sam::Session>(proxy.proxy, &interruptNet);
+ std::make_unique<i2p::sam::Session>(proxy, &interruptNet);
} else {
i2p_transient_session.swap(m_unused_i2p_sessions.front());
m_unused_i2p_sessions.pop();
@@ -483,20 +478,11 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
addr_bind = CAddress{conn.me, NODE_NONE};
}
} else if (use_proxy) {
- sock = CreateSock(proxy.proxy);
- if (!sock) {
- return nullptr;
- }
- connected = ConnectThroughProxy(proxy, addrConnect.ToStringAddr(), addrConnect.GetPort(),
- *sock, nConnectTimeout, proxyConnectionFailed);
+ LogPrintLevel(BCLog::PROXY, BCLog::Level::Debug, "Using proxy: %s to connect to %s:%s\n", proxy.ToString(), addrConnect.ToStringAddr(), addrConnect.GetPort());
+ sock = ConnectThroughProxy(proxy, addrConnect.ToStringAddr(), addrConnect.GetPort(), proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
- sock = CreateSock(addrConnect);
- if (!sock) {
- return nullptr;
- }
- connected = ConnectSocketDirectly(addrConnect, *sock, nConnectTimeout,
- conn_type == ConnectionType::MANUAL);
+ sock = ConnectDirectly(addrConnect, conn_type == ConnectionType::MANUAL);
}
if (!proxyConnectionFailed) {
// If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to
@@ -504,21 +490,20 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
addrman.Attempt(addrConnect, fCountFailure);
}
} else if (pszDest && GetNameProxy(proxy)) {
- sock = CreateSock(proxy.proxy);
- if (!sock) {
- return nullptr;
- }
std::string host;
uint16_t port{default_port};
SplitHostPort(std::string(pszDest), port, host);
bool proxyConnectionFailed;
- connected = ConnectThroughProxy(proxy, host, port, *sock, nConnectTimeout,
- proxyConnectionFailed);
+ sock = ConnectThroughProxy(proxy, host, port, proxyConnectionFailed);
}
- if (!connected) {
+ if (!sock) {
return nullptr;
}
+ NetPermissionFlags permission_flags = NetPermissionFlags::None;
+ std::vector<NetWhitelistPermissions> whitelist_permissions = conn_type == ConnectionType::MANUAL ? vWhitelistedRangeOutgoing : std::vector<NetWhitelistPermissions>{};
+ AddWhitelistPermissionFlags(permission_flags, addrConnect, whitelist_permissions);
+
// Add node
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
@@ -535,6 +520,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
conn_type,
/*inbound_onion=*/false,
CNodeOptions{
+ .permission_flags = permission_flags,
.i2p_sam_session = std::move(i2p_transient_session),
.recv_flood_size = nReceiveFloodSize,
.use_v2transport = use_v2transport,
@@ -558,9 +544,18 @@ void CNode::CloseSocketDisconnect()
m_i2p_sam_session.reset();
}
-void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const {
- for (const auto& subnet : vWhitelistedRange) {
- if (subnet.m_subnet.Match(addr)) NetPermissions::AddFlag(flags, subnet.m_flags);
+void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const {
+ for (const auto& subnet : ranges) {
+ if (subnet.m_subnet.Match(addr)) {
+ NetPermissions::AddFlag(flags, subnet.m_flags);
+ }
+ }
+ if (NetPermissions::HasFlag(flags, NetPermissionFlags::Implicit)) {
+ NetPermissions::ClearFlag(flags, NetPermissionFlags::Implicit);
+ if (whitelist_forcerelay) NetPermissions::AddFlag(flags, NetPermissionFlags::ForceRelay);
+ if (whitelist_relay) NetPermissions::AddFlag(flags, NetPermissionFlags::Relay);
+ NetPermissions::AddFlag(flags, NetPermissionFlags::Mempool);
+ NetPermissions::AddFlag(flags, NetPermissionFlags::NoBan);
}
}
@@ -575,7 +570,7 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) {
AssertLockNotHeld(m_addr_local_mutex);
LOCK(m_addr_local_mutex);
if (addrLocal.IsValid()) {
- error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort());
+ LogError("Addr local already set for node: %i. Refusing to change from %s to %s\n", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort());
} else {
addrLocal = addrLocalIn;
}
@@ -1726,14 +1721,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
{
int nInbound = 0;
- AddWhitelistPermissionFlags(permission_flags, addr);
- if (NetPermissions::HasFlag(permission_flags, NetPermissionFlags::Implicit)) {
- NetPermissions::ClearFlag(permission_flags, NetPermissionFlags::Implicit);
- if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) NetPermissions::AddFlag(permission_flags, NetPermissionFlags::ForceRelay);
- if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) NetPermissions::AddFlag(permission_flags, NetPermissionFlags::Relay);
- NetPermissions::AddFlag(permission_flags, NetPermissionFlags::Mempool);
- NetPermissions::AddFlag(permission_flags, NetPermissionFlags::NoBan);
- }
+ AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming);
{
LOCK(m_nodes_mutex);
@@ -1788,15 +1776,10 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
- ServiceFlags nodeServices = nLocalServices;
- if (NetPermissions::HasFlag(permission_flags, NetPermissionFlags::BloomFilter)) {
- nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM);
- }
-
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
// The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
// detected, so use it whenever we signal NODE_P2P_V2.
- const bool use_v2transport(nodeServices & NODE_P2P_V2);
+ const bool use_v2transport(nLocalServices & NODE_P2P_V2);
CNode* pnode = new CNode(id,
std::move(sock),
@@ -1814,7 +1797,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
.use_v2transport = use_v2transport,
});
pnode->AddRef();
- m_msgproc->InitializeNode(*pnode, nodeServices);
+ m_msgproc->InitializeNode(*pnode, nLocalServices);
LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToStringAddrPort());
@@ -2993,7 +2976,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
return false;
}
- std::unique_ptr<Sock> sock = CreateSock(addrBind);
+ std::unique_ptr<Sock> sock = CreateSock(addrBind.GetSAFamily());
if (!sock) {
strError = strprintf(Untranslated("Couldn't open socket for incoming connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError()));
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
@@ -3200,7 +3183,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
Proxy i2p_sam;
if (GetProxy(NET_I2P, i2p_sam) && connOptions.m_i2p_accept_incoming) {
m_i2p_sam_session = std::make_unique<i2p::sam::Session>(gArgs.GetDataDirNet() / "i2p_private_key",
- i2p_sam.proxy, &interruptNet);
+ i2p_sam, &interruptNet);
}
for (const auto& strDest : connOptions.vSeedNodes) {
diff --git a/src/net.h b/src/net.h
index e78e122c44..46d9422695 100644
--- a/src/net.h
+++ b/src/net.h
@@ -53,11 +53,6 @@ class CNode;
class CScheduler;
struct bilingual_str;
-/** Default for -whitelistrelay. */
-static const bool DEFAULT_WHITELISTRELAY = true;
-/** Default for -whitelistforcerelay. */
-static const bool DEFAULT_WHITELISTFORCERELAY = false;
-
/** Time after which to disconnect, after waiting for a ping response (or inactivity). */
static constexpr std::chrono::minutes TIMEOUT_INTERVAL{20};
/** Run the feeler connection loop once every 2 minutes. **/
@@ -1053,7 +1048,8 @@ public:
uint64_t nMaxOutboundLimit = 0;
int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
std::vector<std::string> vSeedNodes;
- std::vector<NetWhitelistPermissions> vWhitelistedRange;
+ std::vector<NetWhitelistPermissions> vWhitelistedRangeIncoming;
+ std::vector<NetWhitelistPermissions> vWhitelistedRangeOutgoing;
std::vector<NetWhitebindPermissions> vWhiteBinds;
std::vector<CService> vBinds;
std::vector<CService> onion_binds;
@@ -1064,6 +1060,8 @@ public:
std::vector<std::string> m_specified_outgoing;
std::vector<std::string> m_added_nodes;
bool m_i2p_accept_incoming;
+ bool whitelist_forcerelay = DEFAULT_WHITELISTFORCERELAY;
+ bool whitelist_relay = DEFAULT_WHITELISTRELAY;
};
void Init(const Options& connOptions) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_total_bytes_sent_mutex)
@@ -1087,7 +1085,8 @@ public:
LOCK(m_total_bytes_sent_mutex);
nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
}
- vWhitelistedRange = connOptions.vWhitelistedRange;
+ vWhitelistedRangeIncoming = connOptions.vWhitelistedRangeIncoming;
+ vWhitelistedRangeOutgoing = connOptions.vWhitelistedRangeOutgoing;
{
LOCK(m_added_nodes_mutex);
// Attempt v2 connection if we support v2 - we'll reconnect with v1 if our
@@ -1098,6 +1097,8 @@ public:
}
}
m_onion_binds = connOptions.onion_binds;
+ whitelist_forcerelay = connOptions.whitelist_forcerelay;
+ whitelist_relay = connOptions.whitelist_relay;
}
CConnman(uint64_t seed0, uint64_t seed1, AddrMan& addrman, const NetGroupManager& netgroupman,
@@ -1339,7 +1340,7 @@ private:
bool AttemptToEvictConnection();
CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
- void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const;
+ void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const;
void DeleteNode(CNode* pnode);
@@ -1398,7 +1399,9 @@ private:
// Whitelisted ranges. Any node connecting from these is automatically
// whitelisted (as well as those connecting to whitelisted binds).
- std::vector<NetWhitelistPermissions> vWhitelistedRange;
+ std::vector<NetWhitelistPermissions> vWhitelistedRangeIncoming;
+ // Whitelisted ranges for outgoing connections.
+ std::vector<NetWhitelistPermissions> vWhitelistedRangeOutgoing;
unsigned int nSendBufferMaxSize{0};
unsigned int nReceiveFloodSize{0};
@@ -1552,6 +1555,18 @@ private:
std::vector<CService> m_onion_binds;
/**
+ * flag for adding 'forcerelay' permission to whitelisted inbound
+ * and manual peers with default permissions.
+ */
+ bool whitelist_forcerelay;
+
+ /**
+ * flag for adding 'relay' permission to whitelisted inbound
+ * and manual peers with default permissions.
+ */
+ bool whitelist_relay;
+
+ /**
* Mutex protecting m_i2p_sam_sessions.
*/
Mutex m_unused_i2p_sessions_mutex;
diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp
index a134a55264..b01b2f643d 100644
--- a/src/net_permissions.cpp
+++ b/src/net_permissions.cpp
@@ -21,9 +21,10 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{
namespace {
// Parse the following format: "perm1,perm2@xxxxxx"
-bool TryParsePermissionFlags(const std::string& str, NetPermissionFlags& output, size_t& readen, bilingual_str& error)
+static bool TryParsePermissionFlags(const std::string& str, NetPermissionFlags& output, ConnectionDirection* output_connection_direction, size_t& readen, bilingual_str& error)
{
NetPermissionFlags flags = NetPermissionFlags::None;
+ ConnectionDirection connection_direction = ConnectionDirection::None;
const auto atSeparator = str.find('@');
// if '@' is not found (ie, "xxxxx"), the caller should apply implicit permissions
@@ -52,6 +53,15 @@ bool TryParsePermissionFlags(const std::string& str, NetPermissionFlags& output,
else if (permission == "all") NetPermissions::AddFlag(flags, NetPermissionFlags::All);
else if (permission == "relay") NetPermissions::AddFlag(flags, NetPermissionFlags::Relay);
else if (permission == "addr") NetPermissions::AddFlag(flags, NetPermissionFlags::Addr);
+ else if (permission == "in") connection_direction |= ConnectionDirection::In;
+ else if (permission == "out") {
+ if (output_connection_direction == nullptr) {
+ // Only NetWhitebindPermissions() should pass a nullptr.
+ error = _("whitebind may only be used for incoming connections (\"out\" was passed)");
+ return false;
+ }
+ connection_direction |= ConnectionDirection::Out;
+ }
else if (permission.length() == 0); // Allow empty entries
else {
error = strprintf(_("Invalid P2P permission: '%s'"), permission);
@@ -61,7 +71,16 @@ bool TryParsePermissionFlags(const std::string& str, NetPermissionFlags& output,
readen++;
}
+ // By default, whitelist only applies to incoming connections
+ if (connection_direction == ConnectionDirection::None) {
+ connection_direction = ConnectionDirection::In;
+ } else if (flags == NetPermissionFlags::None) {
+ error = strprintf(_("Only direction was set, no permissions: '%s'"), str);
+ return false;
+ }
+
output = flags;
+ if (output_connection_direction) *output_connection_direction = connection_direction;
error = Untranslated("");
return true;
}
@@ -85,7 +104,7 @@ bool NetWhitebindPermissions::TryParse(const std::string& str, NetWhitebindPermi
{
NetPermissionFlags flags;
size_t offset;
- if (!TryParsePermissionFlags(str, flags, offset, error)) return false;
+ if (!TryParsePermissionFlags(str, flags, /*output_connection_direction=*/nullptr, offset, error)) return false;
const std::string strBind = str.substr(offset);
const std::optional<CService> addrBind{Lookup(strBind, 0, false)};
@@ -104,11 +123,12 @@ bool NetWhitebindPermissions::TryParse(const std::string& str, NetWhitebindPermi
return true;
}
-bool NetWhitelistPermissions::TryParse(const std::string& str, NetWhitelistPermissions& output, bilingual_str& error)
+bool NetWhitelistPermissions::TryParse(const std::string& str, NetWhitelistPermissions& output, ConnectionDirection& output_connection_direction, bilingual_str& error)
{
NetPermissionFlags flags;
size_t offset;
- if (!TryParsePermissionFlags(str, flags, offset, error)) return false;
+ // Only NetWhitebindPermissions should pass a nullptr for output_connection_direction.
+ if (!TryParsePermissionFlags(str, flags, &output_connection_direction, offset, error)) return false;
const std::string net = str.substr(offset);
const CSubNet subnet{LookupSubNet(net)};
diff --git a/src/net_permissions.h b/src/net_permissions.h
index b7f3bffe1c..33babd6204 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <netaddress.h>
+#include <netbase.h>
#include <string>
#include <type_traits>
@@ -15,6 +16,11 @@ struct bilingual_str;
extern const std::vector<std::string> NET_PERMISSIONS_DOC;
+/** Default for -whitelistrelay. */
+constexpr bool DEFAULT_WHITELISTRELAY = true;
+/** Default for -whitelistforcerelay. */
+constexpr bool DEFAULT_WHITELISTFORCERELAY = false;
+
enum class NetPermissionFlags : uint32_t {
None = 0,
// Can query bloomfilter even if -peerbloomfilters is false
@@ -83,7 +89,7 @@ public:
class NetWhitelistPermissions : public NetPermissions
{
public:
- static bool TryParse(const std::string& str, NetWhitelistPermissions& output, bilingual_str& error);
+ static bool TryParse(const std::string& str, NetWhitelistPermissions& output, ConnectionDirection& output_connection_direction, bilingual_str& error);
CSubNet m_subnet;
};
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index c77fcbff3e..6996af38cb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -582,6 +582,20 @@ private:
*/
bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
+ /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID.
+ * @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact.
+ * Set to false if the tx has already been rejected before,
+ * e.g. is an orphan, to avoid adding duplicate entries.
+ * Updates m_txrequest, m_recent_rejects, m_orphanage, and vExtraTxnForCompact. */
+ void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
+ bool maybe_add_extra_compact_tx)
+ EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+
+ /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID.
+ * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */
+ void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
+ EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+
/**
* Reconsider orphan transactions after a parent has been accepted to the mempool.
*
@@ -1588,6 +1602,11 @@ void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services)
m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
assert(m_txrequest.Count(nodeid) == 0);
}
+
+ if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) {
+ our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
+ }
+
PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
{
LOCK(m_peer_mutex);
@@ -3049,6 +3068,91 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
return;
}
+void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state,
+ bool maybe_add_extra_compact_tx)
+{
+ AssertLockNotHeld(m_peer_mutex);
+ AssertLockHeld(g_msgproc_mutex);
+ AssertLockHeld(cs_main);
+
+ LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
+ ptx->GetHash().ToString(),
+ ptx->GetWitnessHash().ToString(),
+ nodeid,
+ state.ToString());
+
+ if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
+ return;
+ } else if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
+ // We can add the wtxid of this transaction to our reject filter.
+ // Do not add txids of witness transactions or witness-stripped
+ // transactions to the filter, as they can have been malleated;
+ // adding such txids to the reject filter would potentially
+ // interfere with relay of valid transactions from peers that
+ // do not support wtxid-based relay. See
+ // https://github.com/bitcoin/bitcoin/issues/8279 for details.
+ // We can remove this restriction (and always add wtxids to
+ // the filter even for witness stripped transactions) once
+ // wtxid-based relay is broadly deployed.
+ // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
+ // for concerns around weakening security of unupgraded nodes
+ // if we start doing this too early.
+ m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256());
+ m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) {
+ m_recent_rejects.insert(ptx->GetHash().ToUint256());
+ m_txrequest.ForgetTxHash(ptx->GetHash());
+ }
+ if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
+ AddToCompactExtraTransactions(ptx);
+ }
+ }
+
+ MaybePunishNodeForTx(nodeid, state);
+
+ // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the
+ // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0.
+ if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetHash()) > 0) {
+ LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString());
+ }
+}
+
+void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
+{
+ AssertLockNotHeld(m_peer_mutex);
+ AssertLockHeld(g_msgproc_mutex);
+ AssertLockHeld(cs_main);
+
+ // As this version of the transaction was acceptable, we can forget about any requests for it.
+ // No-op if the tx is not in txrequest.
+ m_txrequest.ForgetTxHash(tx->GetHash());
+ m_txrequest.ForgetTxHash(tx->GetWitnessHash());
+
+ m_orphanage.AddChildrenToWorkSet(*tx);
+ // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage.
+ m_orphanage.EraseTx(tx->GetHash());
+
+ LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
+ nodeid,
+ tx->GetHash().ToString(),
+ tx->GetWitnessHash().ToString(),
+ m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
+
+ RelayTransaction(tx->GetHash(), tx->GetWitnessHash());
+
+ for (const CTransactionRef& removedTx : replaced_transactions) {
+ AddToCompactExtraTransactions(removedTx);
+ }
+}
+
bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
{
AssertLockHeld(g_msgproc_mutex);
@@ -3064,66 +3168,23 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
- LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
- peer.m_id,
- orphanHash.ToString(),
- orphan_wtxid.ToString(),
- m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
- RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
- m_orphanage.AddChildrenToWorkSet(*porphanTx);
- m_orphanage.EraseTx(orphanHash);
- for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
- AddToCompactExtraTransactions(removedTx);
- }
+ Assume(result.m_replaced_transactions.has_value());
+ std::list<CTransactionRef> empty_replacement_list;
+ ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions.value_or(empty_replacement_list));
return true;
} else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
- if (state.IsInvalid()) {
- LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
- orphanHash.ToString(),
- orphan_wtxid.ToString(),
- peer.m_id,
- state.ToString());
- LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
- orphanHash.ToString(),
- orphan_wtxid.ToString(),
- peer.m_id,
- state.ToString());
- // Maybe punish peer that gave us an invalid orphan tx
- MaybePunishNodeForTx(peer.m_id, state);
- }
- // Has inputs but not accepted to mempool
- // Probably non-standard or insufficient fee
- LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
- if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
- // We can add the wtxid of this transaction to our reject filter.
- // Do not add txids of witness transactions or witness-stripped
- // transactions to the filter, as they can have been malleated;
- // adding such txids to the reject filter would potentially
- // interfere with relay of valid transactions from peers that
- // do not support wtxid-based relay. See
- // https://github.com/bitcoin/bitcoin/issues/8279 for details.
- // We can remove this restriction (and always add wtxids to
- // the filter even for witness stripped transactions) once
- // wtxid-based relay is broadly deployed.
- // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
- // for concerns around weakening security of unupgraded nodes
- // if we start doing this too early.
- m_recent_rejects.insert(porphanTx->GetWitnessHash().ToUint256());
- // If the transaction failed for TX_INPUTS_NOT_STANDARD,
- // then we know that the witness was irrelevant to the policy
- // failure, since this check depends only on the txid
- // (the scriptPubKey being spent is covered by the txid).
- // Add the txid to the reject filter to prevent repeated
- // processing of this transaction in the event that child
- // transactions are later received (resulting in
- // parent-fetching by txid via the orphan-handling logic).
- if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->HasWitness()) {
- // We only add the txid if it differs from the wtxid, to
- // avoid wasting entries in the rolling bloom filter.
- m_recent_rejects.insert(porphanTx->GetHash().ToUint256());
- }
+ LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
+ orphanHash.ToString(),
+ orphan_wtxid.ToString(),
+ peer.m_id,
+ state.ToString());
+
+ if (Assume(state.IsInvalid() &&
+ state.GetResult() != TxValidationResult::TX_UNKNOWN &&
+ state.GetResult() != TxValidationResult::TX_NO_MEMPOOL &&
+ state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) {
+ ProcessInvalidTx(peer.m_id, porphanTx, state, /*maybe_add_extra_compact_tx=*/false);
}
- m_orphanage.EraseTx(orphanHash);
return true;
}
}
@@ -4293,24 +4354,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
const TxValidationState& state = result.m_state;
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
- // As this version of the transaction was acceptable, we can forget about any
- // requests for it.
- m_txrequest.ForgetTxHash(tx.GetHash());
- m_txrequest.ForgetTxHash(tx.GetWitnessHash());
- RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
- m_orphanage.AddChildrenToWorkSet(tx);
-
+ ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions.value());
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
-
- LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
- pfrom.GetId(),
- tx.GetHash().ToString(),
- tx.GetWitnessHash().ToString(),
- m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
-
- for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
- AddToCompactExtraTransactions(removedTx);
- }
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
@@ -4371,48 +4416,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
}
- } else {
- if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
- // We can add the wtxid of this transaction to our reject filter.
- // Do not add txids of witness transactions or witness-stripped
- // transactions to the filter, as they can have been malleated;
- // adding such txids to the reject filter would potentially
- // interfere with relay of valid transactions from peers that
- // do not support wtxid-based relay. See
- // https://github.com/bitcoin/bitcoin/issues/8279 for details.
- // We can remove this restriction (and always add wtxids to
- // the filter even for witness stripped transactions) once
- // wtxid-based relay is broadly deployed.
- // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
- // for concerns around weakening security of unupgraded nodes
- // if we start doing this too early.
- m_recent_rejects.insert(tx.GetWitnessHash().ToUint256());
- m_txrequest.ForgetTxHash(tx.GetWitnessHash());
- // If the transaction failed for TX_INPUTS_NOT_STANDARD,
- // then we know that the witness was irrelevant to the policy
- // failure, since this check depends only on the txid
- // (the scriptPubKey being spent is covered by the txid).
- // Add the txid to the reject filter to prevent repeated
- // processing of this transaction in the event that child
- // transactions are later received (resulting in
- // parent-fetching by txid via the orphan-handling logic).
- if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.HasWitness()) {
- m_recent_rejects.insert(tx.GetHash().ToUint256());
- m_txrequest.ForgetTxHash(tx.GetHash());
- }
- if (RecursiveDynamicUsage(*ptx) < 100000) {
- AddToCompactExtraTransactions(ptx);
- }
- }
}
-
if (state.IsInvalid()) {
- LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
- tx.GetHash().ToString(),
- tx.GetWitnessHash().ToString(),
- pfrom.GetId(),
- state.ToString());
- MaybePunishNodeForTx(pfrom.GetId(), state);
+ ProcessInvalidTx(pfrom.GetId(), ptx, state, /*maybe_add_extra_compact_tx=*/true);
}
return;
}
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 7530334db1..74ab6dd8d8 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -818,6 +818,19 @@ bool CService::SetSockAddr(const struct sockaddr *paddr)
}
}
+sa_family_t CService::GetSAFamily() const
+{
+ switch (m_net) {
+ case NET_IPV4:
+ return AF_INET;
+ case NET_IPV6:
+ case NET_CJDNS:
+ return AF_INET6;
+ default:
+ return AF_UNSPEC;
+ }
+}
+
uint16_t CService::GetPort() const
{
return port;
diff --git a/src/netaddress.h b/src/netaddress.h
index c697b7e0a3..c63bd4b4e5 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -540,6 +540,11 @@ public:
uint16_t GetPort() const;
bool GetSockAddr(struct sockaddr* paddr, socklen_t* addrlen) const;
bool SetSockAddr(const struct sockaddr* paddr);
+ /**
+ * Get the address family
+ * @returns AF_UNSPEC if unspecified
+ */
+ [[nodiscard]] sa_family_t GetSAFamily() const;
friend bool operator==(const CService& a, const CService& b);
friend bool operator!=(const CService& a, const CService& b) { return !(a == b); }
friend bool operator<(const CService& a, const CService& b);
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 9fbd9f7dea..3ca1a5227a 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -3,6 +3,10 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#if defined(HAVE_CONFIG_H)
+#include <config/bitcoin-config.h>
+#endif
+
#include <netbase.h>
#include <compat/compat.h>
@@ -21,6 +25,10 @@
#include <limits>
#include <memory>
+#if HAVE_SOCKADDR_UN
+#include <sys/un.h>
+#endif
+
// Settings
static GlobalMutex g_proxyinfo_mutex;
static Proxy proxyInfo[NET_MAX] GUARDED_BY(g_proxyinfo_mutex);
@@ -208,6 +216,24 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault, DNSLookupF
return Lookup(name, portDefault, /*fAllowLookup=*/false, dns_lookup_function).value_or(CService{});
}
+bool IsUnixSocketPath(const std::string& name)
+{
+#if HAVE_SOCKADDR_UN
+ if (name.find(ADDR_PREFIX_UNIX) != 0) return false;
+
+ // Split off "unix:" prefix
+ std::string str{name.substr(ADDR_PREFIX_UNIX.length())};
+
+ // Path size limit is platform-dependent
+ // see https://manpages.ubuntu.com/manpages/xenial/en/man7/unix.7.html
+ if (str.size() + 1 > sizeof(((sockaddr_un*)nullptr)->sun_path)) return false;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
/** SOCKS version */
enum SOCKSVersion: uint8_t {
SOCKS4 = 0x04,
@@ -338,7 +364,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
IntrRecvError recvr;
LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest);
if (strDest.size() > 255) {
- return error("Hostname too long");
+ LogError("Hostname too long\n");
+ return false;
}
// Construct the version identifier/method selection message
std::vector<uint8_t> vSocks5Init;
@@ -358,14 +385,17 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
return false;
}
if (pchRet1[0] != SOCKSVersion::SOCKS5) {
- return error("Proxy failed to initialize");
+ LogError("Proxy failed to initialize\n");
+ return false;
}
if (pchRet1[1] == SOCKS5Method::USER_PASS && auth) {
// Perform username/password authentication (as described in RFC1929)
std::vector<uint8_t> vAuth;
vAuth.push_back(0x01); // Current (and only) version of user/pass subnegotiation
- if (auth->username.size() > 255 || auth->password.size() > 255)
- return error("Proxy username or password too long");
+ if (auth->username.size() > 255 || auth->password.size() > 255) {
+ LogError("Proxy username or password too long\n");
+ return false;
+ }
vAuth.push_back(auth->username.size());
vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end());
vAuth.push_back(auth->password.size());
@@ -374,15 +404,18 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
uint8_t pchRetA[2];
if (InterruptibleRecv(pchRetA, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) {
- return error("Error reading proxy authentication response");
+ LogError("Error reading proxy authentication response\n");
+ return false;
}
if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) {
- return error("Proxy authentication unsuccessful");
+ LogError("Proxy authentication unsuccessful\n");
+ return false;
}
} else if (pchRet1[1] == SOCKS5Method::NOAUTH) {
// Perform no authentication
} else {
- return error("Proxy requested wrong authentication method %02x", pchRet1[1]);
+ LogError("Proxy requested wrong authentication method %02x\n", pchRet1[1]);
+ return false;
}
std::vector<uint8_t> vSocks5;
vSocks5.push_back(SOCKSVersion::SOCKS5); // VER protocol version
@@ -402,11 +435,13 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
* error message. */
return false;
} else {
- return error("Error while reading proxy response");
+ LogError("Error while reading proxy response\n");
+ return false;
}
}
if (pchRet2[0] != SOCKSVersion::SOCKS5) {
- return error("Proxy failed to accept request");
+ LogError("Proxy failed to accept request\n");
+ return false;
}
if (pchRet2[1] != SOCKS5Reply::SUCCEEDED) {
// Failures to connect to a peer that are not proxy errors
@@ -414,7 +449,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
return false;
}
if (pchRet2[2] != 0x00) { // Reserved field must be 0
- return error("Error: malformed proxy response");
+ LogError("Error: malformed proxy response\n");
+ return false;
}
uint8_t pchRet3[256];
switch (pchRet2[3]) {
@@ -423,39 +459,46 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
case SOCKS5Atyp::DOMAINNAME: {
recvr = InterruptibleRecv(pchRet3, 1, g_socks5_recv_timeout, sock);
if (recvr != IntrRecvError::OK) {
- return error("Error reading from proxy");
+ LogError("Error reading from proxy\n");
+ return false;
}
int nRecv = pchRet3[0];
recvr = InterruptibleRecv(pchRet3, nRecv, g_socks5_recv_timeout, sock);
break;
}
- default: return error("Error: malformed proxy response");
+ default: {
+ LogError("Error: malformed proxy response\n");
+ return false;
+ }
}
if (recvr != IntrRecvError::OK) {
- return error("Error reading from proxy");
+ LogError("Error reading from proxy\n");
+ return false;
}
if (InterruptibleRecv(pchRet3, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) {
- return error("Error reading from proxy");
+ LogError("Error reading from proxy\n");
+ return false;
}
LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest);
return true;
} catch (const std::runtime_error& e) {
- return error("Error during SOCKS5 proxy handshake: %s", e.what());
+ LogError("Error during SOCKS5 proxy handshake: %s\n", e.what());
+ return false;
}
}
-std::unique_ptr<Sock> CreateSockTCP(const CService& address_family)
+std::unique_ptr<Sock> CreateSockOS(sa_family_t address_family)
{
- // Create a sockaddr from the specified service.
- struct sockaddr_storage sockaddr;
- socklen_t len = sizeof(sockaddr);
- if (!address_family.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
- LogPrintf("Cannot create socket for %s: unsupported network\n", address_family.ToStringAddrPort());
- return nullptr;
- }
+ // Not IPv4, IPv6 or UNIX
+ if (address_family == AF_UNSPEC) return nullptr;
+
+ int protocol{IPPROTO_TCP};
+#if HAVE_SOCKADDR_UN
+ if (address_family == AF_UNIX) protocol = 0;
+#endif
- // Create a TCP socket in the address family of the specified service.
- SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
+ // Create a socket in the specified address family.
+ SOCKET hSocket = socket(address_family, SOCK_STREAM, protocol);
if (hSocket == INVALID_SOCKET) {
return nullptr;
}
@@ -479,21 +522,25 @@ std::unique_ptr<Sock> CreateSockTCP(const CService& address_family)
}
#endif
- // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
- const int on{1};
- if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
- LogPrint(BCLog::NET, "Unable to set TCP_NODELAY on a newly created socket, continuing anyway\n");
- }
-
// Set the non-blocking option on the socket.
if (!sock->SetNonBlocking()) {
LogPrintf("Error setting socket to non-blocking: %s\n", NetworkErrorString(WSAGetLastError()));
return nullptr;
}
+
+#if HAVE_SOCKADDR_UN
+ if (address_family == AF_UNIX) return sock;
+#endif
+
+ // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
+ const int on{1};
+ if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
+ LogPrint(BCLog::NET, "Unable to set TCP_NODELAY on a newly created socket, continuing anyway\n");
+ }
return sock;
}
-std::function<std::unique_ptr<Sock>(const CService&)> CreateSock = CreateSockTCP;
+std::function<std::unique_ptr<Sock>(const sa_family_t&)> CreateSock = CreateSockOS;
template<typename... Args>
static void LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args) {
@@ -505,18 +552,10 @@ static void LogConnectFailure(bool manual_connection, const char* fmt, const Arg
}
}
-bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nTimeout, bool manual_connection)
+static bool ConnectToSocket(const Sock& sock, struct sockaddr* sockaddr, socklen_t len, const std::string& dest_str, bool manual_connection)
{
- // Create a sockaddr from the specified service.
- struct sockaddr_storage sockaddr;
- socklen_t len = sizeof(sockaddr);
- if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
- LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToStringAddrPort());
- return false;
- }
-
- // Connect to the addrConnect service on the hSocket socket.
- if (sock.Connect(reinterpret_cast<struct sockaddr*>(&sockaddr), len) == SOCKET_ERROR) {
+ // Connect to `sockaddr` using `sock`.
+ if (sock.Connect(sockaddr, len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
// WSAEINVAL is here because some legacy version of winsock uses it
if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL)
@@ -526,13 +565,13 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
// synchronously to check for successful connection with a timeout.
const Sock::Event requested = Sock::RECV | Sock::SEND;
Sock::Event occurred;
- if (!sock.Wait(std::chrono::milliseconds{nTimeout}, requested, &occurred)) {
+ if (!sock.Wait(std::chrono::milliseconds{nConnectTimeout}, requested, &occurred)) {
LogPrintf("wait for connect to %s failed: %s\n",
- addrConnect.ToStringAddrPort(),
+ dest_str,
NetworkErrorString(WSAGetLastError()));
return false;
} else if (occurred == 0) {
- LogPrint(BCLog::NET, "connection attempt to %s timed out\n", addrConnect.ToStringAddrPort());
+ LogPrint(BCLog::NET, "connection attempt to %s timed out\n", dest_str);
return false;
}
@@ -544,13 +583,13 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
socklen_t sockerr_len = sizeof(sockerr);
if (sock.GetSockOpt(SOL_SOCKET, SO_ERROR, (sockopt_arg_type)&sockerr, &sockerr_len) ==
SOCKET_ERROR) {
- LogPrintf("getsockopt() for %s failed: %s\n", addrConnect.ToStringAddrPort(), NetworkErrorString(WSAGetLastError()));
+ LogPrintf("getsockopt() for %s failed: %s\n", dest_str, NetworkErrorString(WSAGetLastError()));
return false;
}
if (sockerr != 0) {
LogConnectFailure(manual_connection,
"connect() to %s failed after wait: %s",
- addrConnect.ToStringAddrPort(),
+ dest_str,
NetworkErrorString(sockerr));
return false;
}
@@ -561,13 +600,72 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
else
#endif
{
- LogConnectFailure(manual_connection, "connect() to %s failed: %s", addrConnect.ToStringAddrPort(), NetworkErrorString(WSAGetLastError()));
+ LogConnectFailure(manual_connection, "connect() to %s failed: %s", dest_str, NetworkErrorString(WSAGetLastError()));
return false;
}
}
return true;
}
+std::unique_ptr<Sock> ConnectDirectly(const CService& dest, bool manual_connection)
+{
+ auto sock = CreateSock(dest.GetSAFamily());
+ if (!sock) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Cannot create a socket for connecting to %s\n", dest.ToStringAddrPort());
+ return {};
+ }
+
+ // Create a sockaddr from the specified service.
+ struct sockaddr_storage sockaddr;
+ socklen_t len = sizeof(sockaddr);
+ if (!dest.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
+ LogPrintf("Cannot get sockaddr for %s: unsupported network\n", dest.ToStringAddrPort());
+ return {};
+ }
+
+ if (!ConnectToSocket(*sock, (struct sockaddr*)&sockaddr, len, dest.ToStringAddrPort(), manual_connection)) {
+ return {};
+ }
+
+ return sock;
+}
+
+std::unique_ptr<Sock> Proxy::Connect() const
+{
+ if (!IsValid()) {
+ LogPrintf("Cannot connect to invalid Proxy\n");
+ return {};
+ }
+
+ if (!m_is_unix_socket) return ConnectDirectly(proxy, /*manual_connection=*/true);
+
+#if HAVE_SOCKADDR_UN
+ auto sock = CreateSock(AF_UNIX);
+ if (!sock) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Cannot create a socket for connecting to %s\n", m_unix_socket_path);
+ return {};
+ }
+
+ const std::string path{m_unix_socket_path.substr(ADDR_PREFIX_UNIX.length())};
+
+ struct sockaddr_un addrun;
+ memset(&addrun, 0, sizeof(addrun));
+ addrun.sun_family = AF_UNIX;
+ // leave the last char in addrun.sun_path[] to be always '\0'
+ memcpy(addrun.sun_path, path.c_str(), std::min(sizeof(addrun.sun_path) - 1, path.length()));
+ socklen_t len = sizeof(addrun);
+
+ if(!ConnectToSocket(*sock, (struct sockaddr*)&addrun, len, path, /*manual_connection=*/true)) {
+ LogPrintf("Cannot connect to socket for %s\n", path);
+ return {};
+ }
+
+ return sock;
+#else
+ return {};
+#endif
+}
+
bool SetProxy(enum Network net, const Proxy &addrProxy) {
assert(net >= 0 && net < NET_MAX);
if (!addrProxy.IsValid())
@@ -616,27 +714,32 @@ bool IsProxy(const CNetAddr &addr) {
return false;
}
-bool ConnectThroughProxy(const Proxy& proxy, const std::string& strDest, uint16_t port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed)
+std::unique_ptr<Sock> ConnectThroughProxy(const Proxy& proxy,
+ const std::string& dest,
+ uint16_t port,
+ bool& proxy_connection_failed)
{
// first connect to proxy server
- if (!ConnectSocketDirectly(proxy.proxy, sock, nTimeout, true)) {
- outProxyConnectionFailed = true;
- return false;
+ auto sock = proxy.Connect();
+ if (!sock) {
+ proxy_connection_failed = true;
+ return {};
}
+
// do socks negotiation
- if (proxy.randomize_credentials) {
+ if (proxy.m_randomize_credentials) {
ProxyCredentials random_auth;
static std::atomic_int counter(0);
random_auth.username = random_auth.password = strprintf("%i", counter++);
- if (!Socks5(strDest, port, &random_auth, sock)) {
- return false;
+ if (!Socks5(dest, port, &random_auth, *sock)) {
+ return {};
}
} else {
- if (!Socks5(strDest, port, nullptr, sock)) {
- return false;
+ if (!Socks5(dest, port, nullptr, *sock)) {
+ return {};
}
}
- return true;
+ return sock;
}
CSubNet LookupSubNet(const std::string& subnet_str)
diff --git a/src/netbase.h b/src/netbase.h
index 1bd95ba0d9..321c288f67 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -27,6 +27,9 @@ static const int DEFAULT_CONNECT_TIMEOUT = 5000;
//! -dns default
static const int DEFAULT_NAME_LOOKUP = true;
+/** Prefix for unix domain socket addresses (which are local filesystem paths) */
+const std::string ADDR_PREFIX_UNIX = "unix:";
+
enum class ConnectionDirection {
None = 0,
In = (1U << 0),
@@ -43,16 +46,46 @@ static inline bool operator&(ConnectionDirection a, ConnectionDirection b) {
return (underlying(a) & underlying(b));
}
+/**
+ * Check if a string is a valid UNIX domain socket path
+ *
+ * @param name The string provided by the user representing a local path
+ *
+ * @returns Whether the string has proper format, length, and points to an existing file path
+ */
+bool IsUnixSocketPath(const std::string& name);
+
class Proxy
{
public:
- Proxy(): randomize_credentials(false) {}
- explicit Proxy(const CService &_proxy, bool _randomize_credentials=false): proxy(_proxy), randomize_credentials(_randomize_credentials) {}
-
- bool IsValid() const { return proxy.IsValid(); }
+ Proxy() : m_is_unix_socket(false), m_randomize_credentials(false) {}
+ explicit Proxy(const CService& _proxy, bool _randomize_credentials = false) : proxy(_proxy), m_is_unix_socket(false), m_randomize_credentials(_randomize_credentials) {}
+ explicit Proxy(const std::string path, bool _randomize_credentials = false) : m_unix_socket_path(path), m_is_unix_socket(true), m_randomize_credentials(_randomize_credentials) {}
CService proxy;
- bool randomize_credentials;
+ std::string m_unix_socket_path;
+ bool m_is_unix_socket;
+ bool m_randomize_credentials;
+
+ bool IsValid() const
+ {
+ if (m_is_unix_socket) return IsUnixSocketPath(m_unix_socket_path);
+ return proxy.IsValid();
+ }
+
+ sa_family_t GetFamily() const
+ {
+ if (m_is_unix_socket) return AF_UNIX;
+ return proxy.GetSAFamily();
+ }
+
+ std::string ToString() const
+ {
+ if (m_is_unix_socket) return m_unix_socket_path;
+ return proxy.ToStringAddrPort();
+ }
+
+ std::unique_ptr<Sock> Connect() const;
};
/** Credentials for proxy authentication */
@@ -229,47 +262,42 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault = 0, DNSLoo
CSubNet LookupSubNet(const std::string& subnet_str);
/**
- * Create a TCP socket in the given address family.
- * @param[in] address_family The socket is created in the same address family as this address.
+ * Create a TCP or UNIX socket in the given address family.
+ * @param[in] address_family to use for the socket.
* @return pointer to the created Sock object or unique_ptr that owns nothing in case of failure
*/
-std::unique_ptr<Sock> CreateSockTCP(const CService& address_family);
+std::unique_ptr<Sock> CreateSockOS(sa_family_t address_family);
/**
- * Socket factory. Defaults to `CreateSockTCP()`, but can be overridden by unit tests.
+ * Socket factory. Defaults to `CreateSockOS()`, but can be overridden by unit tests.
*/
-extern std::function<std::unique_ptr<Sock>(const CService&)> CreateSock;
+extern std::function<std::unique_ptr<Sock>(const sa_family_t&)> CreateSock;
/**
- * Try to connect to the specified service on the specified socket.
+ * Create a socket and try to connect to the specified service.
*
- * @param addrConnect The service to which to connect.
- * @param sock The socket on which to connect.
- * @param nTimeout Wait this many milliseconds for the connection to be
- * established.
- * @param manual_connection Whether or not the connection was manually requested
- * (e.g. through the addnode RPC)
+ * @param[in] dest The service to which to connect.
+ * @param[in] manual_connection Whether or not the connection was manually requested (e.g. through the addnode RPC)
*
- * @returns Whether or not a connection was successfully made.
+ * @returns the connected socket if the operation succeeded, empty unique_ptr otherwise
*/
-bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nTimeout, bool manual_connection);
+std::unique_ptr<Sock> ConnectDirectly(const CService& dest, bool manual_connection);
/**
* Connect to a specified destination service through a SOCKS5 proxy by first
* connecting to the SOCKS5 proxy.
*
- * @param proxy The SOCKS5 proxy.
- * @param strDest The destination service to which to connect.
- * @param port The destination port.
- * @param sock The socket on which to connect to the SOCKS5 proxy.
- * @param nTimeout Wait this many milliseconds for the connection to the SOCKS5
- * proxy to be established.
- * @param[out] outProxyConnectionFailed Whether or not the connection to the
- * SOCKS5 proxy failed.
+ * @param[in] proxy The SOCKS5 proxy.
+ * @param[in] dest The destination service to which to connect.
+ * @param[in] port The destination port.
+ * @param[out] proxy_connection_failed Whether or not the connection to the SOCKS5 proxy failed.
*
- * @returns Whether or not the operation succeeded.
+ * @returns the connected socket if the operation succeeded. Otherwise an empty unique_ptr.
*/
-bool ConnectThroughProxy(const Proxy& proxy, const std::string& strDest, uint16_t port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed);
+std::unique_ptr<Sock> ConnectThroughProxy(const Proxy& proxy,
+ const std::string& dest,
+ uint16_t port,
+ bool& proxy_connection_failed);
/**
* Interrupt SOCKS5 reads or writes.
diff --git a/src/node/abort.cpp b/src/node/abort.cpp
index 1bdc91670d..b727608384 100644
--- a/src/node/abort.cpp
+++ b/src/node/abort.cpp
@@ -16,14 +16,13 @@
namespace node {
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const std::string& debug_message, const bilingual_str& user_message)
+void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message)
{
- SetMiscWarning(Untranslated(debug_message));
- LogPrintf("*** %s\n", debug_message);
- InitError(user_message.empty() ? _("A fatal internal error occurred, see debug.log for details") : user_message);
+ SetMiscWarning(message);
+ InitError(_("A fatal internal error occurred, see debug.log for details: ") + message);
exit_status.store(EXIT_FAILURE);
if (shutdown && !(*shutdown)()) {
- LogPrintf("Error: failed to send shutdown signal\n");
+ LogError("Failed to send shutdown signal\n");
};
}
} // namespace node
diff --git a/src/node/abort.h b/src/node/abort.h
index 28d021cc78..1092279142 100644
--- a/src/node/abort.h
+++ b/src/node/abort.h
@@ -5,17 +5,16 @@
#ifndef BITCOIN_NODE_ABORT_H
#define BITCOIN_NODE_ABORT_H
-#include <util/translation.h>
-
#include <atomic>
-#include <string>
+
+struct bilingual_str;
namespace util {
class SignalInterrupt;
} // namespace util
namespace node {
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const std::string& debug_message, const bilingual_str& user_message = {});
+void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message);
} // namespace node
#endif // BITCOIN_NODE_ABORT_H
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index c499bbfa6a..576c07a833 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -131,12 +131,14 @@ bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, s
pindexNew->nTx = diskindex.nTx;
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
- return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
+ LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
+ return false;
}
pcursor->Next();
} else {
- return error("%s: failed to read value", __func__);
+ LogError("%s: failed to read value\n", __func__);
+ return false;
}
} else {
break;
@@ -402,7 +404,7 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
if (snapshot_blockhash) {
const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
if (!maybe_au_data) {
- m_opts.notifications.fatalError(strprintf("Assumeutxo data not found for the given blockhash '%s'.", snapshot_blockhash->ToString()));
+ m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
return false;
}
const AssumeutxoData& au_data = *Assert(maybe_au_data);
@@ -432,7 +434,8 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
for (CBlockIndex* pindex : vSortedByHeight) {
if (m_interrupt) return false;
if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
- return error("%s: block index is non-contiguous, index of height %d missing", __func__, previous_index->nHeight + 1);
+ LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
+ return false;
}
previous_index = pindex;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
@@ -671,7 +674,8 @@ bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos
// Open history file to append
AutoFile fileout{OpenUndoFile(pos)};
if (fileout.IsNull()) {
- return error("%s: OpenUndoFile failed", __func__);
+ LogError("%s: OpenUndoFile failed\n", __func__);
+ return false;
}
// Write index header
@@ -681,7 +685,8 @@ bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos
// Write undo data
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0) {
- return error("%s: ftell failed", __func__);
+ LogError("%s: ftell failed\n", __func__);
+ return false;
}
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
@@ -700,13 +705,15 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in
const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
if (pos.IsNull()) {
- return error("%s: no undo data available", __func__);
+ LogError("%s: no undo data available\n", __func__);
+ return false;
}
// Open history file to read
AutoFile filein{OpenUndoFile(pos, true)};
if (filein.IsNull()) {
- return error("%s: OpenUndoFile failed", __func__);
+ LogError("%s: OpenUndoFile failed\n", __func__);
+ return false;
}
// Read block
@@ -717,12 +724,14 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in
verifier >> blockundo;
filein >> hashChecksum;
} catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
+ LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what());
+ return false;
}
// Verify checksum
if (hashChecksum != verifier.GetHash()) {
- return error("%s: Checksum mismatch", __func__);
+ LogError("%s: Checksum mismatch\n", __func__);
+ return false;
}
return true;
@@ -732,7 +741,7 @@ bool BlockManager::FlushUndoFile(int block_file, bool finalize)
{
FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
- m_opts.notifications.flushError("Flushing undo file to disk failed. This is likely the result of an I/O error.");
+ m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
return false;
}
return true;
@@ -754,7 +763,7 @@ bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finali
FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
- m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error.");
+ m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
success = false;
}
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
@@ -897,19 +906,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
if (!fKnown) {
LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
- }
- // Do not propagate the return code. The flush concerns a previous block
- // and undo file that has already been written to. If a flush fails
- // here, and we crash, there is no expected additional block data
- // inconsistency arising from the flush failure here. However, the undo
- // data may be inconsistent after a crash if the flush is called during
- // a reindex. A flush error might also leave some of the data files
- // untrimmed.
- if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) {
- LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
- "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n",
- last_blockfile, !fKnown, finalize_undo, nFile);
+ // Do not propagate the return code. The flush concerns a previous block
+ // and undo file that has already been written to. If a flush fails
+ // here, and we crash, there is no expected additional block data
+ // inconsistency arising from the flush failure here. However, the undo
+ // data may be inconsistent after a crash if the flush is called during
+ // a reindex. A flush error might also leave some of the data files
+ // untrimmed.
+ if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) {
+ LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
+ "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n",
+ last_blockfile, !fKnown, finalize_undo, nFile);
+ }
}
// No undo data yet in the new file, so reset our undo-height tracking.
m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
@@ -926,7 +935,7 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
bool out_of_space;
size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
if (out_of_space) {
- m_opts.notifications.fatalError("Disk space is too low!", _("Disk space is too low!"));
+ m_opts.notifications.fatalError(_("Disk space is too low!"));
return false;
}
if (bytes_allocated != 0 && IsPruneMode()) {
@@ -951,7 +960,7 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP
bool out_of_space;
size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
if (out_of_space) {
- return FatalError(m_opts.notifications, state, "Disk space is too low!", _("Disk space is too low!"));
+ return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
}
if (bytes_allocated != 0 && IsPruneMode()) {
m_check_for_pruning = true;
@@ -965,7 +974,8 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
// Open history file to append
AutoFile fileout{OpenBlockFile(pos)};
if (fileout.IsNull()) {
- return error("WriteBlockToDisk: OpenBlockFile failed");
+ LogError("WriteBlockToDisk: OpenBlockFile failed\n");
+ return false;
}
// Write index header
@@ -975,7 +985,8 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
// Write block
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0) {
- return error("WriteBlockToDisk: ftell failed");
+ LogError("WriteBlockToDisk: ftell failed\n");
+ return false;
}
pos.nPos = (unsigned int)fileOutPos;
fileout << TX_WITH_WITNESS(block);
@@ -993,10 +1004,11 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid
if (block.GetUndoPos().IsNull()) {
FlatFilePos _pos;
if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
- return error("ConnectBlock(): FindUndoPos failed");
+ LogError("ConnectBlock(): FindUndoPos failed\n");
+ return false;
}
if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
- return FatalError(m_opts.notifications, state, "Failed to write undo data");
+ return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
}
// rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
// we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
@@ -1031,24 +1043,28 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) cons
// Open history file to read
AutoFile filein{OpenBlockFile(pos, true)};
if (filein.IsNull()) {
- return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
+ LogError("ReadBlockFromDisk: OpenBlockFile failed for %s\n", pos.ToString());
+ return false;
}
// Read block
try {
filein >> TX_WITH_WITNESS(block);
} catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
+ LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
+ return false;
}
// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
- return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
+ LogError("ReadBlockFromDisk: Errors in block header at %s\n", pos.ToString());
+ return false;
}
// Signet only: check block solution
if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
- return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString());
+ LogError("ReadBlockFromDisk: Errors in block solution at %s\n", pos.ToString());
+ return false;
}
return true;
@@ -1062,8 +1078,9 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) co
return false;
}
if (block.GetHash() != index.GetBlockHash()) {
- return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
+ LogError("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s\n",
index.ToString(), block_pos.ToString());
+ return false;
}
return true;
}
@@ -1071,10 +1088,17 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) co
bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
{
FlatFilePos hpos = pos;
+ // If nPos is less than 8 the pos is null and we don't have the block data
+ // Return early to prevent undefined behavior of unsigned int underflow
+ if (hpos.nPos < 8) {
+ LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
+ return false;
+ }
hpos.nPos -= 8; // Seek back 8 bytes for meta header
AutoFile filein{OpenBlockFile(hpos, true)};
if (filein.IsNull()) {
- return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
+ LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
+ return false;
}
try {
@@ -1084,20 +1108,23 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatF
filein >> blk_start >> blk_size;
if (blk_start != GetParams().MessageStart()) {
- return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
+ LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
HexStr(blk_start),
HexStr(GetParams().MessageStart()));
+ return false;
}
if (blk_size > MAX_SIZE) {
- return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
+ LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
blk_size, MAX_SIZE);
+ return false;
}
block.resize(blk_size); // Zeroing of memory is intentional here
filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
- return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
+ LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
+ return false;
}
return true;
@@ -1117,12 +1144,12 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, cons
nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
}
if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime(), position_known)) {
- error("%s: FindBlockPos failed", __func__);
+ LogError("%s: FindBlockPos failed\n", __func__);
return FlatFilePos();
}
if (!position_known) {
if (!WriteBlockToDisk(block, blockPos)) {
- m_opts.notifications.fatalError("Failed to write block");
+ m_opts.notifications.fatalError(_("Failed to write block."));
return FlatFilePos();
}
}
@@ -1206,7 +1233,7 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile
for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
BlockValidationState state;
if (!chainstate->ActivateBestChain(state, nullptr)) {
- chainman.GetNotifications().fatalError(strprintf("Failed to connect best block (%s)", state.ToString()));
+ chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
return;
}
}
diff --git a/src/node/kernel_notifications.cpp b/src/node/kernel_notifications.cpp
index 1fd3bad296..99f909ff75 100644
--- a/src/node/kernel_notifications.cpp
+++ b/src/node/kernel_notifications.cpp
@@ -84,15 +84,15 @@ void KernelNotifications::warning(const bilingual_str& warning)
DoWarning(warning);
}
-void KernelNotifications::flushError(const std::string& debug_message)
+void KernelNotifications::flushError(const bilingual_str& message)
{
- AbortNode(&m_shutdown, m_exit_status, debug_message);
+ AbortNode(&m_shutdown, m_exit_status, message);
}
-void KernelNotifications::fatalError(const std::string& debug_message, const bilingual_str& user_message)
+void KernelNotifications::fatalError(const bilingual_str& message)
{
node::AbortNode(m_shutdown_on_fatal_error ? &m_shutdown : nullptr,
- m_exit_status, debug_message, user_message);
+ m_exit_status, message);
}
void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications)
diff --git a/src/node/kernel_notifications.h b/src/node/kernel_notifications.h
index 38d8600ac6..f4d97a0fff 100644
--- a/src/node/kernel_notifications.h
+++ b/src/node/kernel_notifications.h
@@ -9,7 +9,6 @@
#include <atomic>
#include <cstdint>
-#include <string>
class ArgsManager;
class CBlockIndex;
@@ -37,9 +36,9 @@ public:
void warning(const bilingual_str& warning) override;
- void flushError(const std::string& debug_message) override;
+ void flushError(const bilingual_str& message) override;
- void fatalError(const std::string& debug_message, const bilingual_str& user_message = {}) override;
+ void fatalError(const bilingual_str& message) override;
//! Block height after which blockTip notification will return Interrupted{}, if >0.
int m_stop_at_height{DEFAULT_STOPATHEIGHT};
diff --git a/src/node/transaction.h b/src/node/transaction.h
index 168273594c..6782536ace 100644
--- a/src/node/transaction.h
+++ b/src/node/transaction.h
@@ -26,6 +26,12 @@ struct NodeContext;
*/
static const CFeeRate DEFAULT_MAX_RAW_TX_FEE_RATE{COIN / 10};
+/** Maximum burn value for sendrawtransaction, submitpackage, and testmempoolaccept RPC calls.
+ * By default, a transaction with a burn value higher than this will be rejected
+ * by these RPCs and the GUI. This can be overridden with the maxburnamount argument.
+ */
+static const CAmount DEFAULT_MAX_BURN_AMOUNT{0};
+
/**
* Submit a transaction to the mempool and (optionally) relay it to all P2P peers.
*
diff --git a/src/noui.cpp b/src/noui.cpp
index af5a180ce3..23637dfa1f 100644
--- a/src/noui.cpp
+++ b/src/noui.cpp
@@ -28,20 +28,21 @@ bool noui_ThreadSafeMessageBox(const bilingual_str& message, const std::string&
switch (style) {
case CClientUIInterface::MSG_ERROR:
strCaption = "Error: ";
+ if (!fSecure) LogError("%s\n", message.original);
break;
case CClientUIInterface::MSG_WARNING:
strCaption = "Warning: ";
+ if (!fSecure) LogWarning("%s\n", message.original);
break;
case CClientUIInterface::MSG_INFORMATION:
strCaption = "Information: ";
+ if (!fSecure) LogInfo("%s\n", message.original);
break;
default:
strCaption = caption + ": "; // Use supplied caption (can be empty)
+ if (!fSecure) LogInfo("%s%s\n", strCaption, message.original);
}
- if (!fSecure) {
- LogPrintf("%s%s\n", strCaption, message.original);
- }
tfm::format(std::cerr, "%s%s\n", strCaption, message.original);
return false;
}
diff --git a/src/policy/v3_policy.cpp b/src/policy/v3_policy.cpp
index f838dc6c0f..3c3942d707 100644
--- a/src/policy/v3_policy.cpp
+++ b/src/policy/v3_policy.cpp
@@ -130,10 +130,11 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
}
// It shouldn't be possible to have any mempool siblings at this point. SingleV3Checks
- // catches mempool siblings. Also, if the package consists of connected transactions,
+ // catches mempool siblings and sibling eviction is not extended to packages. Also, if the package consists of connected transactions,
// any tx having a mempool ancestor would mean the package exceeds ancestor limits.
if (!Assume(!parent_info.m_has_mempool_descendant)) {
- return strprintf("tx %u would exceed descendant count limit", parent_info.m_wtxid.ToString());
+ return strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
+ parent_info.m_txid.ToString(), parent_info.m_wtxid.ToString());
}
}
} else {
@@ -158,7 +159,7 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
return std::nullopt;
}
-std::optional<std::string> SingleV3Checks(const CTransactionRef& ptx,
+std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx,
const CTxMemPool::setEntries& mempool_ancestors,
const std::set<Txid>& direct_conflicts,
int64_t vsize)
@@ -166,13 +167,15 @@ std::optional<std::string> SingleV3Checks(const CTransactionRef& ptx,
// Check v3 and non-v3 inheritance.
for (const auto& entry : mempool_ancestors) {
if (ptx->nVersion != 3 && entry->GetTx().nVersion == 3) {
- return strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
+ return std::make_pair(strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(),
- entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString());
+ entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString()),
+ nullptr);
} else if (ptx->nVersion == 3 && entry->GetTx().nVersion != 3) {
- return strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
+ return std::make_pair(strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(),
- entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString());
+ entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString()),
+ nullptr);
}
}
@@ -185,16 +188,18 @@ std::optional<std::string> SingleV3Checks(const CTransactionRef& ptx,
// Check that V3_ANCESTOR_LIMIT would not be violated.
if (mempool_ancestors.size() + 1 > V3_ANCESTOR_LIMIT) {
- return strprintf("tx %s (wtxid=%s) would have too many ancestors",
- ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString());
+ return std::make_pair(strprintf("tx %s (wtxid=%s) would have too many ancestors",
+ ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()),
+ nullptr);
}
// Remaining checks only pertain to transactions with unconfirmed ancestors.
if (mempool_ancestors.size() > 0) {
// If this transaction spends V3 parents, it cannot be too large.
if (vsize > V3_CHILD_MAX_VSIZE) {
- return strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
- ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE);
+ return std::make_pair(strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
+ ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE),
+ nullptr);
}
// Check the descendant counts of in-mempool ancestors.
@@ -210,9 +215,20 @@ std::optional<std::string> SingleV3Checks(const CTransactionRef& ptx,
std::any_of(children.cbegin(), children.cend(),
[&direct_conflicts](const CTxMemPoolEntry& child){return direct_conflicts.count(child.GetTx().GetHash()) > 0;});
if (parent_entry->GetCountWithDescendants() + 1 > V3_DESCENDANT_LIMIT && !child_will_be_replaced) {
- return strprintf("tx %u (wtxid=%s) would exceed descendant count limit",
- parent_entry->GetSharedTx()->GetHash().ToString(),
- parent_entry->GetSharedTx()->GetWitnessHash().ToString());
+ // Allow sibling eviction for v3 transaction: if another child already exists, even if
+ // we don't conflict inputs with it, consider evicting it under RBF rules. We rely on v3 rules
+ // only permitting 1 descendant, as otherwise we would need to have logic for deciding
+ // which descendant to evict. Skip if this isn't true, e.g. if the transaction has
+ // multiple children or the sibling also has descendants due to a reorg.
+ const bool consider_sibling_eviction{parent_entry->GetCountWithDescendants() == 2 &&
+ children.begin()->get().GetCountWithAncestors() == 2};
+
+ // Return the sibling if its eviction can be considered. Provide the "descendant count
+ // limit" string either way, as the caller may decide not to do sibling eviction.
+ return std::make_pair(strprintf("tx %u (wtxid=%s) would exceed descendant count limit",
+ parent_entry->GetSharedTx()->GetHash().ToString(),
+ parent_entry->GetSharedTx()->GetWitnessHash().ToString()),
+ consider_sibling_eviction ? children.begin()->get().GetSharedTx() : nullptr);
}
}
return std::nullopt;
diff --git a/src/policy/v3_policy.h b/src/policy/v3_policy.h
index 9e871915e5..2e56f8822b 100644
--- a/src/policy/v3_policy.h
+++ b/src/policy/v3_policy.h
@@ -48,9 +48,15 @@ static_assert(V3_CHILD_MAX_VSIZE + MAX_STANDARD_TX_WEIGHT / WITNESS_SCALE_FACTOR
* count of in-mempool ancestors.
* @param[in] vsize The sigop-adjusted virtual size of ptx.
*
- * @returns debug string if an error occurs, std::nullopt otherwise.
+ * @returns 3 possibilities:
+ * - std::nullopt if all v3 checks were applied successfully
+ * - debug string + pointer to a mempool sibling if this transaction would be the second child in a
+ * 1-parent-1-child cluster; the caller may consider evicting the specified sibling or return an
+ * error with the debug string.
+ * - debug string + nullptr if this transaction violates some v3 rule and sibling eviction is not
+ * applicable.
*/
-std::optional<std::string> SingleV3Checks(const CTransactionRef& ptx,
+std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx,
const CTxMemPool::setEntries& mempool_ancestors,
const std::set<Txid>& direct_conflicts,
int64_t vsize);
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index a87bef796c..dd654a7abe 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -454,20 +454,24 @@ void OptionsDialog::updateProxyValidationState()
void OptionsDialog::updateDefaultProxyNets()
{
- const std::optional<CNetAddr> ui_proxy_netaddr{LookupHost(ui->proxyIp->text().toStdString(), /*fAllowLookup=*/false)};
- const CService ui_proxy{ui_proxy_netaddr.value_or(CNetAddr{}), ui->proxyPort->text().toUShort()};
+ std::string proxyIpText{ui->proxyIp->text().toStdString()};
+ if (!IsUnixSocketPath(proxyIpText)) {
+ const std::optional<CNetAddr> ui_proxy_netaddr{LookupHost(proxyIpText, /*fAllowLookup=*/false)};
+ const CService ui_proxy{ui_proxy_netaddr.value_or(CNetAddr{}), ui->proxyPort->text().toUShort()};
+ proxyIpText = ui_proxy.ToStringAddrPort();
+ }
Proxy proxy;
bool has_proxy;
has_proxy = model->node().getProxy(NET_IPV4, proxy);
- ui->proxyReachIPv4->setChecked(has_proxy && proxy.proxy == ui_proxy);
+ ui->proxyReachIPv4->setChecked(has_proxy && proxy.ToString() == proxyIpText);
has_proxy = model->node().getProxy(NET_IPV6, proxy);
- ui->proxyReachIPv6->setChecked(has_proxy && proxy.proxy == ui_proxy);
+ ui->proxyReachIPv6->setChecked(has_proxy && proxy.ToString() == proxyIpText);
has_proxy = model->node().getProxy(NET_ONION, proxy);
- ui->proxyReachTor->setChecked(has_proxy && proxy.proxy == ui_proxy);
+ ui->proxyReachTor->setChecked(has_proxy && proxy.ToString() == proxyIpText);
}
ProxyAddressValidator::ProxyAddressValidator(QObject *parent) :
diff --git a/src/rest.cpp b/src/rest.cpp
index 91184745c8..89c033b8a3 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -13,6 +13,7 @@
#include <chain.h>
#include <chainparams.h>
#include <core_io.h>
+#include <flatfile.h>
#include <httpserver.h>
#include <index/blockfilterindex.h>
#include <index/txindex.h>
@@ -34,7 +35,7 @@
#include <validation.h>
#include <any>
-#include <string>
+#include <vector>
#include <univalue.h>
@@ -295,7 +296,7 @@ static bool rest_block(const std::any& context,
if (!ParseHashStr(hashStr, hash))
return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
- CBlock block;
+ FlatFilePos pos{};
const CBlockIndex* pblockindex = nullptr;
const CBlockIndex* tip = nullptr;
ChainstateManager* maybe_chainman = GetChainman(context, req);
@@ -311,32 +312,33 @@ static bool rest_block(const std::any& context,
if (chainman.m_blockman.IsBlockPruned(*pblockindex)) {
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
}
+ pos = pblockindex->GetBlockPos();
}
- if (!chainman.m_blockman.ReadBlockFromDisk(block, *pblockindex)) {
+ std::vector<uint8_t> block_data{};
+ if (!chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)) {
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
switch (rf) {
case RESTResponseFormat::BINARY: {
- DataStream ssBlock;
- ssBlock << TX_WITH_WITNESS(block);
- std::string binaryBlock = ssBlock.str();
+ const std::string binaryBlock{block_data.begin(), block_data.end()};
req->WriteHeader("Content-Type", "application/octet-stream");
req->WriteReply(HTTP_OK, binaryBlock);
return true;
}
case RESTResponseFormat::HEX: {
- DataStream ssBlock;
- ssBlock << TX_WITH_WITNESS(block);
- std::string strHex = HexStr(ssBlock) + "\n";
+ const std::string strHex{HexStr(block_data) + "\n"};
req->WriteHeader("Content-Type", "text/plain");
req->WriteReply(HTTP_OK, strHex);
return true;
}
case RESTResponseFormat::JSON: {
+ CBlock block{};
+ DataStream block_stream{block_data};
+ block_stream >> TX_WITH_WITNESS(block);
UniValue objBlock = blockToJSON(chainman.m_blockman, block, *tip, *pblockindex, tx_verbosity);
std::string strJSON = objBlock.write() + "\n";
req->WriteHeader("Content-Type", "application/json");
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index dfdddeacea..a1135c27d4 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -17,6 +17,7 @@
#include <core_io.h>
#include <deploymentinfo.h>
#include <deploymentstatus.h>
+#include <flatfile.h>
#include <hash.h>
#include <index/blockfilterindex.h>
#include <index/coinstatsindex.h>
@@ -595,6 +596,28 @@ static CBlock GetBlockChecked(BlockManager& blockman, const CBlockIndex& blockin
return block;
}
+static std::vector<uint8_t> GetRawBlockChecked(BlockManager& blockman, const CBlockIndex& blockindex)
+{
+ std::vector<uint8_t> data{};
+ FlatFilePos pos{};
+ {
+ LOCK(cs_main);
+ if (blockman.IsBlockPruned(blockindex)) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
+ }
+ pos = blockindex.GetBlockPos();
+ }
+
+ if (!blockman.ReadRawBlockFromDisk(data, pos)) {
+ // Block not found on disk. This could be because we have the block
+ // header in our index but not yet have the block or did not accept the
+ // block. Or if the block was pruned right after we released the lock above.
+ throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
+ }
+
+ return data;
+}
+
static CBlockUndo GetUndoChecked(BlockManager& blockman, const CBlockIndex& blockindex)
{
CBlockUndo blockUndo;
@@ -735,15 +758,16 @@ static RPCHelpMan getblock()
}
}
- const CBlock block{GetBlockChecked(chainman.m_blockman, *pblockindex)};
+ const std::vector<uint8_t> block_data{GetRawBlockChecked(chainman.m_blockman, *pblockindex)};
if (verbosity <= 0) {
- DataStream ssBlock;
- ssBlock << TX_WITH_WITNESS(block);
- std::string strHex = HexStr(ssBlock);
- return strHex;
+ return HexStr(block_data);
}
+ DataStream block_stream{block_data};
+ CBlock block{};
+ block_stream >> TX_WITH_WITNESS(block);
+
TxVerbosity tx_verbosity;
if (verbosity == 1) {
tx_verbosity = TxVerbosity::SHOW_TXID;
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 5825efdf82..eb05f33b42 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -128,6 +128,8 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "testmempoolaccept", 0, "rawtxs" },
{ "testmempoolaccept", 1, "maxfeerate" },
{ "submitpackage", 0, "package" },
+ { "submitpackage", 1, "maxfeerate" },
+ { "submitpackage", 2, "maxburnamount" },
{ "combinerawtransaction", 0, "txs" },
{ "fundrawtransaction", 1, "options" },
{ "fundrawtransaction", 1, "add_inputs"},
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index 25bfec2d45..8539506f2f 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -28,6 +28,7 @@
using kernel::DumpMempool;
+using node::DEFAULT_MAX_BURN_AMOUNT;
using node::DEFAULT_MAX_RAW_TX_FEE_RATE;
using node::MempoolPath;
using node::NodeContext;
@@ -46,7 +47,7 @@ static RPCHelpMan sendrawtransaction()
{"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())},
"Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT +
"/kvB.\nFee rates larger than 1BTC/kvB are rejected.\nSet to 0 to accept any fee rate."},
- {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(0)},
+ {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_BURN_AMOUNT)},
"Reject transactions with provably unspendable outputs (e.g. 'datacarrier' outputs that use the OP_RETURN opcode) greater than the specified value, expressed in " + CURRENCY_UNIT + ".\n"
"If burning funds through unspendable outputs is desired, increase this value.\n"
"This check is based on heuristics and does not guarantee spendability of outputs.\n"},
@@ -180,7 +181,7 @@ static RPCHelpMan testmempoolaccept()
Chainstate& chainstate = chainman.ActiveChainstate();
const PackageMempoolAcceptResult package_result = [&] {
LOCK(::cs_main);
- if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true);
+ if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true, /*max_sane_feerate=*/{});
return PackageMempoolAcceptResult(txns[0]->GetWitnessHash(),
chainman.ProcessTransaction(txns[0], /*test_accept=*/true));
}();
@@ -823,6 +824,14 @@ static RPCHelpMan submitpackage()
{"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
},
},
+ {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())},
+ "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT +
+ "/kvB.\nFee rates larger than 1BTC/kvB are rejected.\nSet to 0 to accept any fee rate."},
+ {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_BURN_AMOUNT)},
+ "Reject transactions with provably unspendable outputs (e.g. 'datacarrier' outputs that use the OP_RETURN opcode) greater than the specified value, expressed in " + CURRENCY_UNIT + ".\n"
+ "If burning funds through unspendable outputs is desired, increase this value.\n"
+ "This check is based on heuristics and does not guarantee spendability of outputs.\n"
+ },
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -862,6 +871,17 @@ static RPCHelpMan submitpackage()
"Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions.");
}
+ // Fee check needs to be run with chainstate and package context
+ const CFeeRate max_raw_tx_fee_rate = ParseFeeRate(self.Arg<UniValue>(1));
+ std::optional<CFeeRate> max_sane_feerate{max_raw_tx_fee_rate};
+ // 0-value is special; it's mapped to no sanity check
+ if (max_raw_tx_fee_rate == CFeeRate(0)) {
+ max_sane_feerate = std::nullopt;
+ }
+
+ // Burn sanity check is run with no context
+ const CAmount max_burn_amount = request.params[2].isNull() ? 0 : AmountFromValue(request.params[2]);
+
std::vector<CTransactionRef> txns;
txns.reserve(raw_transactions.size());
for (const auto& rawtx : raw_transactions.getValues()) {
@@ -870,6 +890,13 @@ static RPCHelpMan submitpackage()
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input.");
}
+
+ for (const auto& out : mtx.vout) {
+ if((out.scriptPubKey.IsUnspendable() || !out.scriptPubKey.HasValidOps()) && out.nValue > max_burn_amount) {
+ throw JSONRPCTransactionError(TransactionError::MAX_BURN_EXCEEDED);
+ }
+ }
+
txns.emplace_back(MakeTransactionRef(std::move(mtx)));
}
if (!IsChildWithParentsTree(txns)) {
@@ -879,7 +906,7 @@ static RPCHelpMan submitpackage()
NodeContext& node = EnsureAnyNodeContext(request.context);
CTxMemPool& mempool = EnsureMemPool(node);
Chainstate& chainstate = EnsureChainman(node).ActiveChainstate();
- const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false));
+ const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false, max_sane_feerate));
std::string package_msg = "success";
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 5e6f42b596..f935a3b08f 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -607,8 +607,8 @@ static UniValue GetNetworksInfo()
obj.pushKV("name", GetNetworkName(network));
obj.pushKV("limited", !g_reachable_nets.Contains(network));
obj.pushKV("reachable", g_reachable_nets.Contains(network));
- obj.pushKV("proxy", proxy.IsValid() ? proxy.proxy.ToStringAddrPort() : std::string());
- obj.pushKV("proxy_randomize_credentials", proxy.randomize_credentials);
+ obj.pushKV("proxy", proxy.IsValid() ? proxy.ToString() : std::string());
+ obj.pushKV("proxy_randomize_credentials", proxy.m_randomize_credentials);
networks.push_back(obj);
}
return networks;
@@ -951,7 +951,7 @@ static RPCHelpMan getnodeaddresses()
static RPCHelpMan addpeeraddress()
{
return RPCHelpMan{"addpeeraddress",
- "\nAdd the address of a potential peer to the address manager. This RPC is for testing only.\n",
+ "Add the address of a potential peer to an address manager table. This RPC is for testing only.",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"},
{"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"},
@@ -960,7 +960,8 @@ static RPCHelpMan addpeeraddress()
RPCResult{
RPCResult::Type::OBJ, "", "",
{
- {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager"},
+ {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager table"},
+ {RPCResult::Type::STR, "error", /*optional=*/true, "error description, if the address could not be added"},
},
},
RPCExamples{
@@ -989,8 +990,13 @@ static RPCHelpMan addpeeraddress()
success = true;
if (tried) {
// Attempt to move the address to the tried addresses table.
- addrman.Good(address);
+ if (!addrman.Good(address)) {
+ success = false;
+ obj.pushKV("error", "failed-adding-to-tried");
+ }
}
+ } else {
+ obj.pushKV("error", "failed-adding-to-new");
}
}
diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp
index ff02ab5a12..baabd4d5b5 100644
--- a/src/script/signingprovider.cpp
+++ b/src/script/signingprovider.cpp
@@ -157,8 +157,10 @@ bool FillableSigningProvider::GetKey(const CKeyID &address, CKey &keyOut) const
bool FillableSigningProvider::AddCScript(const CScript& redeemScript)
{
- if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE)
- return error("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid", MAX_SCRIPT_ELEMENT_SIZE);
+ if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) {
+ LogError("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid\n", MAX_SCRIPT_ELEMENT_SIZE);
+ return false;
+ }
LOCK(cs_KeyStore);
mapScripts[CScriptID(redeemScript)] = redeemScript;
@@ -368,8 +370,6 @@ TaprootBuilder& TaprootBuilder::Add(int depth, Span<const unsigned char> script,
/* Construct NodeInfo object with leaf hash and (if track is true) also leaf information. */
NodeInfo node;
node.hash = ComputeTapleafHash(leaf_version, script);
- // due to bug in clang-tidy-17:
- // NOLINTNEXTLINE(modernize-use-emplace)
if (track) node.leaves.emplace_back(LeafInfo{std::vector<unsigned char>(script.begin(), script.end()), leaf_version, {}});
/* Insert into the branch. */
Insert(std::move(node), depth);
diff --git a/src/test/compress_tests.cpp b/src/test/compress_tests.cpp
index 264b47b07c..13c2740553 100644
--- a/src/test/compress_tests.cpp
+++ b/src/test/compress_tests.cpp
@@ -4,6 +4,7 @@
#include <compressor.h>
#include <script/script.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <stdint.h>
@@ -131,4 +132,36 @@ BOOST_AUTO_TEST_CASE(compress_script_to_uncompressed_pubkey_id)
BOOST_CHECK_EQUAL(out[0], 0x04 | (script[65] & 0x01)); // least significant bit (lsb) of last char of pubkey is mapped into out[0]
}
+BOOST_AUTO_TEST_CASE(compress_p2pk_scripts_not_on_curve)
+{
+ XOnlyPubKey x_not_on_curve;
+ do {
+ x_not_on_curve = XOnlyPubKey(g_insecure_rand_ctx.randbytes(32));
+ } while (x_not_on_curve.IsFullyValid());
+
+ // Check that P2PK script with uncompressed pubkey [=> OP_PUSH65 <0x04 .....> OP_CHECKSIG]
+ // which is not fully valid (i.e. point is not on curve) can't be compressed
+ std::vector<unsigned char> pubkey_raw(65, 0);
+ pubkey_raw[0] = 4;
+ std::copy(x_not_on_curve.begin(), x_not_on_curve.end(), &pubkey_raw[1]);
+ CPubKey pubkey_not_on_curve(pubkey_raw);
+ assert(pubkey_not_on_curve.IsValid());
+ assert(!pubkey_not_on_curve.IsFullyValid());
+ CScript script = CScript() << ToByteVector(pubkey_not_on_curve) << OP_CHECKSIG;
+ BOOST_CHECK_EQUAL(script.size(), 67U);
+
+ CompressedScript out;
+ bool done = CompressScript(script, out);
+ BOOST_CHECK_EQUAL(done, false);
+
+ // Check that compressed P2PK script with uncompressed pubkey that is not fully
+ // valid (i.e. x coordinate of the pubkey is not on curve) can't be decompressed
+ CompressedScript compressed_script(x_not_on_curve.begin(), x_not_on_curve.end());
+ for (unsigned int compression_id : {4, 5}) {
+ CScript uncompressed_script;
+ bool success = DecompressScript(uncompressed_script, compression_id, compressed_script);
+ BOOST_CHECK_EQUAL(success, false);
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index d1a67cb0d8..a8e490b459 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -83,7 +83,7 @@ static const TypeTestOneInput* g_test_one_input{nullptr};
void initialize()
{
// Terminate immediately if a fuzzing harness ever tries to create a TCP socket.
- CreateSock = [](const CService&) -> std::unique_ptr<Sock> { std::terminate(); };
+ CreateSock = [](const sa_family_t&) -> std::unique_ptr<Sock> { std::terminate(); };
// Terminate immediately if a fuzzing harness ever tries to perform a DNS lookup.
g_dns_lookup = [](const std::string& name, bool allow_lookup) {
diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp
index 6ea2139c46..811c0de4b9 100644
--- a/src/test/fuzz/net_permissions.cpp
+++ b/src/test/fuzz/net_permissions.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <net_permissions.h>
+#include <netbase.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -31,8 +32,9 @@ FUZZ_TARGET(net_permissions)
}
NetWhitelistPermissions net_whitelist_permissions;
+ ConnectionDirection connection_direction;
bilingual_str error_net_whitelist_permissions;
- if (NetWhitelistPermissions::TryParse(s, net_whitelist_permissions, error_net_whitelist_permissions)) {
+ if (NetWhitelistPermissions::TryParse(s, net_whitelist_permissions, connection_direction, error_net_whitelist_permissions)) {
(void)NetPermissions::ToStrings(net_whitelist_permissions.m_flags);
(void)NetPermissions::AddFlag(net_whitelist_permissions.m_flags, net_permission_flags);
assert(NetPermissions::HasFlag(net_whitelist_permissions.m_flags, net_permission_flags));
diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp
index a205ce19f4..1b7a732260 100644
--- a/src/test/fuzz/p2p_transport_serialization.cpp
+++ b/src/test/fuzz/p2p_transport_serialization.cpp
@@ -354,6 +354,7 @@ std::unique_ptr<Transport> MakeV2Transport(NodeId nodeid, bool initiator, RNG& r
} else {
// If it's longer, generate it from the RNG. This avoids having large amounts of
// (hopefully) irrelevant data needing to be stored in the fuzzer data.
+ garb.resize(garb_len);
for (auto& v : garb) v = uint8_t(rng());
}
// Retrieve entropy
diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp
index a48ce37bce..cf33b23cd3 100644
--- a/src/test/fuzz/package_eval.cpp
+++ b/src/test/fuzz/package_eval.cpp
@@ -277,7 +277,7 @@ FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool)
auto single_submit = txs.size() == 1 && fuzzed_data_provider.ConsumeBool();
const auto result_package = WITH_LOCK(::cs_main,
- return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit));
+ return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*max_sane_feerate=*/{}));
// Always set bypass_limits to false because it is not supported in ProcessNewPackage and
// can be a source of divergence.
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index b6ba612a84..3611bccced 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -291,7 +291,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool)
// Make sure ProcessNewPackage on one transaction works.
// The result is not guaranteed to be the same as what is returned by ATMP.
const auto result_package = WITH_LOCK(::cs_main,
- return ProcessNewPackage(chainstate, tx_pool, {tx}, true));
+ return ProcessNewPackage(chainstate, tx_pool, {tx}, true, /*max_sane_feerate=*/{}));
// If something went wrong due to a package-specific policy, it might not return a
// validation result for the transaction.
if (result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) {
diff --git a/src/test/i2p_tests.cpp b/src/test/i2p_tests.cpp
index f80f07d190..d7249d88f4 100644
--- a/src/test/i2p_tests.cpp
+++ b/src/test/i2p_tests.cpp
@@ -6,6 +6,7 @@
#include <i2p.h>
#include <logging.h>
#include <netaddress.h>
+#include <netbase.h>
#include <test/util/logging.h>
#include <test/util/net.h>
#include <test/util/setup_common.h>
@@ -38,7 +39,7 @@ public:
private:
const BCLog::Level m_prev_log_level;
- const std::function<std::unique_ptr<Sock>(const CService&)> m_create_sock_orig;
+ const std::function<std::unique_ptr<Sock>(const sa_family_t&)> m_create_sock_orig;
};
BOOST_FIXTURE_TEST_SUITE(i2p_tests, EnvTestingSetup)
@@ -46,12 +47,14 @@ BOOST_FIXTURE_TEST_SUITE(i2p_tests, EnvTestingSetup)
BOOST_AUTO_TEST_CASE(unlimited_recv)
{
// Mock CreateSock() to create MockSock.
- CreateSock = [](const CService&) {
+ CreateSock = [](const sa_family_t&) {
return std::make_unique<StaticContentsSock>(std::string(i2p::sam::MAX_MSG_SIZE + 1, 'a'));
};
CThreadInterrupt interrupt;
- i2p::sam::Session session(gArgs.GetDataDirNet() / "test_i2p_private_key", CService{}, &interrupt);
+ const std::optional<CService> addr{Lookup("127.0.0.1", 9000, false)};
+ const Proxy sam_proxy(addr.value(), false);
+ i2p::sam::Session session(gArgs.GetDataDirNet() / "test_i2p_private_key", sam_proxy, &interrupt);
{
ASSERT_DEBUG_LOG("Creating persistent SAM session");
@@ -66,7 +69,7 @@ BOOST_AUTO_TEST_CASE(unlimited_recv)
BOOST_AUTO_TEST_CASE(listen_ok_accept_fail)
{
size_t num_sockets{0};
- CreateSock = [&num_sockets](const CService&) {
+ CreateSock = [&num_sockets](const sa_family_t&) {
// clang-format off
++num_sockets;
// First socket is the control socket for creating the session.
@@ -111,8 +114,10 @@ BOOST_AUTO_TEST_CASE(listen_ok_accept_fail)
};
CThreadInterrupt interrupt;
+ const CService addr{in6_addr(IN6ADDR_LOOPBACK_INIT), /*port=*/7656};
+ const Proxy sam_proxy(addr, false);
i2p::sam::Session session(gArgs.GetDataDirNet() / "test_i2p_private_key",
- CService{in6_addr(IN6ADDR_LOOPBACK_INIT), /*port=*/7656},
+ sam_proxy,
&interrupt);
i2p::Connection conn;
@@ -130,7 +135,7 @@ BOOST_AUTO_TEST_CASE(damaged_private_key)
{
const auto CreateSockOrig = CreateSock;
- CreateSock = [](const CService&) {
+ CreateSock = [](const sa_family_t&) {
return std::make_unique<StaticContentsSock>("HELLO REPLY RESULT=OK VERSION=3.1\n"
"SESSION STATUS RESULT=OK DESTINATION=\n");
};
@@ -154,7 +159,9 @@ BOOST_AUTO_TEST_CASE(damaged_private_key)
BOOST_REQUIRE(WriteBinaryFile(i2p_private_key_file, file_contents));
CThreadInterrupt interrupt;
- i2p::sam::Session session(i2p_private_key_file, CService{}, &interrupt);
+ const CService addr{in6_addr(IN6ADDR_LOOPBACK_INIT), /*port=*/7656};
+ const Proxy sam_proxy{addr, false};
+ i2p::sam::Session session(i2p_private_key_file, sam_proxy, &interrupt);
{
ASSERT_DEBUG_LOG("Creating persistent SAM session");
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index fa70f62eb4..3422cb8023 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -366,6 +366,7 @@ BOOST_AUTO_TEST_CASE(netpermissions_test)
bilingual_str error;
NetWhitebindPermissions whitebindPermissions;
NetWhitelistPermissions whitelistPermissions;
+ ConnectionDirection connection_direction;
// Detect invalid white bind
BOOST_CHECK(!NetWhitebindPermissions::TryParse("", whitebindPermissions, error));
@@ -435,24 +436,33 @@ BOOST_AUTO_TEST_CASE(netpermissions_test)
BOOST_CHECK(NetWhitebindPermissions::TryParse(",,@1.2.3.4:32", whitebindPermissions, error));
BOOST_CHECK_EQUAL(whitebindPermissions.m_flags, NetPermissionFlags::None);
+ BOOST_CHECK(!NetWhitebindPermissions::TryParse("out,forcerelay@1.2.3.4:32", whitebindPermissions, error));
+ BOOST_CHECK(error.original.find("whitebind may only be used for incoming connections (\"out\" was passed)") != std::string::npos);
+
// Detect invalid flag
BOOST_CHECK(!NetWhitebindPermissions::TryParse("bloom,forcerelay,oopsie@1.2.3.4:32", whitebindPermissions, error));
BOOST_CHECK(error.original.find("Invalid P2P permission") != std::string::npos);
// Check netmask error
- BOOST_CHECK(!NetWhitelistPermissions::TryParse("bloom,forcerelay,noban@1.2.3.4:32", whitelistPermissions, error));
+ BOOST_CHECK(!NetWhitelistPermissions::TryParse("bloom,forcerelay,noban@1.2.3.4:32", whitelistPermissions, connection_direction, error));
BOOST_CHECK(error.original.find("Invalid netmask specified in -whitelist") != std::string::npos);
// Happy path for whitelist parsing
- BOOST_CHECK(NetWhitelistPermissions::TryParse("noban@1.2.3.4", whitelistPermissions, error));
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("noban@1.2.3.4", whitelistPermissions, connection_direction, error));
BOOST_CHECK_EQUAL(whitelistPermissions.m_flags, NetPermissionFlags::NoBan);
BOOST_CHECK(NetPermissions::HasFlag(whitelistPermissions.m_flags, NetPermissionFlags::NoBan));
- BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay@1.2.3.4/32", whitelistPermissions, error));
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay@1.2.3.4/32", whitelistPermissions, connection_direction, error));
BOOST_CHECK_EQUAL(whitelistPermissions.m_flags, NetPermissionFlags::BloomFilter | NetPermissionFlags::ForceRelay | NetPermissionFlags::NoBan | NetPermissionFlags::Relay);
BOOST_CHECK(error.empty());
BOOST_CHECK_EQUAL(whitelistPermissions.m_subnet.ToString(), "1.2.3.4/32");
- BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, error));
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, connection_direction, error));
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("in,relay@1.2.3.4", whitelistPermissions, connection_direction, error));
+ BOOST_CHECK_EQUAL(connection_direction, ConnectionDirection::In);
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("out,bloom@1.2.3.4", whitelistPermissions, connection_direction, error));
+ BOOST_CHECK_EQUAL(connection_direction, ConnectionDirection::Out);
+ BOOST_CHECK(NetWhitelistPermissions::TryParse("in,out,bloom@1.2.3.4", whitelistPermissions, connection_direction, error));
+ BOOST_CHECK_EQUAL(connection_direction, ConnectionDirection::Both);
const auto strings = NetPermissions::ToStrings(NetPermissionFlags::All);
BOOST_CHECK_EQUAL(strings.size(), 7U);
diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp
index b36bdc02ca..304541074f 100644
--- a/src/test/serfloat_tests.cpp
+++ b/src/test/serfloat_tests.cpp
@@ -37,6 +37,7 @@ uint64_t TestDouble(double f) {
} // namespace
BOOST_AUTO_TEST_CASE(double_serfloat_tests) {
+ // Test specific values against their expected encoding.
BOOST_CHECK_EQUAL(TestDouble(0.0), 0U);
BOOST_CHECK_EQUAL(TestDouble(-0.0), 0x8000000000000000);
BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::infinity()), 0x7ff0000000000000U);
@@ -46,55 +47,76 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) {
BOOST_CHECK_EQUAL(TestDouble(2.0), 0x4000000000000000ULL);
BOOST_CHECK_EQUAL(TestDouble(4.0), 0x4010000000000000ULL);
BOOST_CHECK_EQUAL(TestDouble(785.066650390625), 0x4088888880000000ULL);
+ BOOST_CHECK_EQUAL(TestDouble(3.7243058682384174), 0x400dcb60e0031440);
+ BOOST_CHECK_EQUAL(TestDouble(91.64070592566159), 0x4056e901536d447a);
+ BOOST_CHECK_EQUAL(TestDouble(-98.63087668642575), 0xc058a860489c007a);
+ BOOST_CHECK_EQUAL(TestDouble(4.908737756962054), 0x4013a28c268b2b70);
+ BOOST_CHECK_EQUAL(TestDouble(77.9247330021754), 0x40537b2ed3547804);
+ BOOST_CHECK_EQUAL(TestDouble(40.24732825357566), 0x40441fa873c43dfc);
+ BOOST_CHECK_EQUAL(TestDouble(71.39395607929222), 0x4051d936938f27b6);
+ BOOST_CHECK_EQUAL(TestDouble(58.80100710817612), 0x404d668766a2bd70);
+ BOOST_CHECK_EQUAL(TestDouble(-30.10665786964975), 0xc03e1b4dee1e01b8);
+ BOOST_CHECK_EQUAL(TestDouble(60.15231509068704), 0x404e137f0f969814);
+ BOOST_CHECK_EQUAL(TestDouble(-48.15848711335961), 0xc04814494e445bc6);
+ BOOST_CHECK_EQUAL(TestDouble(26.68450101125353), 0x403aaf3b755169b0);
+ BOOST_CHECK_EQUAL(TestDouble(-65.72071986604303), 0xc0506e2046378ede);
+ BOOST_CHECK_EQUAL(TestDouble(17.95575825512381), 0x4031f4ac92b0a388);
+ BOOST_CHECK_EQUAL(TestDouble(-35.27171863226279), 0xc041a2c7ad17a42a);
+ BOOST_CHECK_EQUAL(TestDouble(-8.58810329425124), 0xc0212d1bdffef538);
+ BOOST_CHECK_EQUAL(TestDouble(88.51393044338977), 0x405620e43c83b1c8);
+ BOOST_CHECK_EQUAL(TestDouble(48.07224932612732), 0x4048093f77466ffc);
+ BOOST_CHECK_EQUAL(TestDouble(9.867348871395659e+117), 0x586f4daeb2459b9f);
+ BOOST_CHECK_EQUAL(TestDouble(-1.5166424385129721e+206), 0xeabe3bbc484bd458);
+ BOOST_CHECK_EQUAL(TestDouble(-8.585156555624594e-275), 0x8707c76eee012429);
+ BOOST_CHECK_EQUAL(TestDouble(2.2794371091628822e+113), 0x5777b2184458f4ee);
+ BOOST_CHECK_EQUAL(TestDouble(-1.1290476594131867e+163), 0xe1c91893d3488bb0);
+ BOOST_CHECK_EQUAL(TestDouble(9.143848423979275e-246), 0x0d0ff76e5f2620a3);
+ BOOST_CHECK_EQUAL(TestDouble(-2.8366718125941117e+81), 0xd0d7ec7e754b394a);
+ BOOST_CHECK_EQUAL(TestDouble(-1.2754409481684012e+229), 0xef80d32f8ec55342);
+ BOOST_CHECK_EQUAL(TestDouble(6.000577060053642e-186), 0x197a1be7c8209b6a);
+ BOOST_CHECK_EQUAL(TestDouble(2.0839423284378986e-302), 0x014c94f8689cb0a5);
+ BOOST_CHECK_EQUAL(TestDouble(-1.422140051483753e+259), 0xf5bd99271d04bb35);
+ BOOST_CHECK_EQUAL(TestDouble(-1.0593973991188853e+46), 0xc97db0cdb72d1046);
+ BOOST_CHECK_EQUAL(TestDouble(2.62945125875249e+190), 0x67779b36366c993b);
+ BOOST_CHECK_EQUAL(TestDouble(-2.920377657275094e+115), 0xd7e7b7b45908e23b);
+ BOOST_CHECK_EQUAL(TestDouble(9.790289014855851e-118), 0x27a3c031cc428bcc);
+ BOOST_CHECK_EQUAL(TestDouble(-4.629317182034961e-114), 0xa866ccf0b753705a);
+ BOOST_CHECK_EQUAL(TestDouble(-1.7674605603846528e+279), 0xf9e8ed383ffc3e25);
+ BOOST_CHECK_EQUAL(TestDouble(2.5308171727712605e+120), 0x58ef5cd55f0ec997);
+ BOOST_CHECK_EQUAL(TestDouble(-1.05034156412799e+54), 0xcb25eea1b9350fa0);
- // Roundtrip test on IEC559-compatible systems
- if (std::numeric_limits<double>::is_iec559) {
- BOOST_CHECK_EQUAL(sizeof(double), 8U);
- BOOST_CHECK_EQUAL(sizeof(uint64_t), 8U);
- // Test extreme values
- TestDouble(std::numeric_limits<double>::min());
- TestDouble(-std::numeric_limits<double>::min());
- TestDouble(std::numeric_limits<double>::max());
- TestDouble(-std::numeric_limits<double>::max());
- TestDouble(std::numeric_limits<double>::lowest());
- TestDouble(-std::numeric_limits<double>::lowest());
- TestDouble(std::numeric_limits<double>::quiet_NaN());
- TestDouble(-std::numeric_limits<double>::quiet_NaN());
- TestDouble(std::numeric_limits<double>::signaling_NaN());
- TestDouble(-std::numeric_limits<double>::signaling_NaN());
- TestDouble(std::numeric_limits<double>::denorm_min());
- TestDouble(-std::numeric_limits<double>::denorm_min());
- // Test exact encoding: on currently supported platforms, EncodeDouble
- // should produce exactly the same as the in-memory representation for non-NaN.
- for (int j = 0; j < 1000; ++j) {
- // Iterate over 9 specific bits exhaustively; the others are chosen randomly.
- // These specific bits are the sign bit, and the 2 top and bottom bits of
- // exponent and mantissa in the IEEE754 binary64 format.
- for (int x = 0; x < 512; ++x) {
- uint64_t v = InsecureRandBits(64);
- v &= ~(uint64_t{1} << 0);
- if (x & 1) v |= (uint64_t{1} << 0);
- v &= ~(uint64_t{1} << 1);
- if (x & 2) v |= (uint64_t{1} << 1);
- v &= ~(uint64_t{1} << 50);
- if (x & 4) v |= (uint64_t{1} << 50);
- v &= ~(uint64_t{1} << 51);
- if (x & 8) v |= (uint64_t{1} << 51);
- v &= ~(uint64_t{1} << 52);
- if (x & 16) v |= (uint64_t{1} << 52);
- v &= ~(uint64_t{1} << 53);
- if (x & 32) v |= (uint64_t{1} << 53);
- v &= ~(uint64_t{1} << 61);
- if (x & 64) v |= (uint64_t{1} << 61);
- v &= ~(uint64_t{1} << 62);
- if (x & 128) v |= (uint64_t{1} << 62);
- v &= ~(uint64_t{1} << 63);
- if (x & 256) v |= (uint64_t{1} << 63);
- double f;
- memcpy(&f, &v, 8);
- uint64_t v2 = TestDouble(f);
- if (!std::isnan(f)) BOOST_CHECK_EQUAL(v, v2);
+ // Test extreme values
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::min()), 0x10000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::min()), 0x8010000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::max()), 0x7fefffffffffffff);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::max()), 0xffefffffffffffff);
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::lowest()), 0xffefffffffffffff);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::lowest()), 0x7fefffffffffffff);
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::denorm_min()), 0x1);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::denorm_min()), 0x8000000000000001);
+ // Note that all NaNs are encoded the same way.
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::quiet_NaN()), 0x7ff8000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::quiet_NaN()), 0x7ff8000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::signaling_NaN()), 0x7ff8000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::signaling_NaN()), 0x7ff8000000000000);
+
+ // Construct doubles to test from the encoding.
+ static_assert(sizeof(double) == 8);
+ static_assert(sizeof(uint64_t) == 8);
+ for (int j = 0; j < 1000; ++j) {
+ // Iterate over 9 specific bits exhaustively; the others are chosen randomly.
+ // These specific bits are the sign bit, and the 2 top and bottom bits of
+ // exponent and mantissa in the IEEE754 binary64 format.
+ for (int x = 0; x < 512; ++x) {
+ uint64_t v = InsecureRandBits(64);
+ int x_pos = 0;
+ for (int v_pos : {0, 1, 50, 51, 52, 53, 61, 62, 63}) {
+ v &= ~(uint64_t{1} << v_pos);
+ if ((x >> (x_pos++)) & 1) v |= (uint64_t{1} << v_pos);
}
+ double f;
+ memcpy(&f, &v, 8);
+ TestDouble(f);
}
}
}
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index f6456526bb..eb131dc6bb 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -132,7 +132,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup)
/*output_amount=*/CAmount(48 * COIN), /*submit=*/false);
CTransactionRef tx_child = MakeTransactionRef(mtx_child);
Package package_parent_child{tx_parent, tx_child};
- const auto result_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_parent_child, /*test_accept=*/true);
+ const auto result_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_parent_child, /*test_accept=*/true, /*max_sane_feerate=*/{});
if (auto err_parent_child{CheckPackageMempoolAcceptResult(package_parent_child, result_parent_child, /*expect_valid=*/true, nullptr)}) {
BOOST_ERROR(err_parent_child.value());
} else {
@@ -151,7 +151,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup)
CTransactionRef giant_ptx = create_placeholder_tx(999, 999);
BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000);
Package package_single_giant{giant_ptx};
- auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_single_giant, /*test_accept=*/true);
+ auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_single_giant, /*test_accept=*/true, /*max_sane_feerate=*/{});
if (auto err_single_large{CheckPackageMempoolAcceptResult(package_single_giant, result_single_large, /*expect_valid=*/false, nullptr)}) {
BOOST_ERROR(err_single_large.value());
} else {
@@ -275,7 +275,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
package_unrelated.emplace_back(MakeTransactionRef(mtx));
}
auto result_unrelated_submit = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_unrelated, /*test_accept=*/false);
+ package_unrelated, /*test_accept=*/false, /*max_sane_feerate=*/{});
// We don't expect m_tx_results for each transaction when basic sanity checks haven't passed.
BOOST_CHECK(result_unrelated_submit.m_state.IsInvalid());
BOOST_CHECK_EQUAL(result_unrelated_submit.m_state.GetResult(), PackageValidationResult::PCKG_POLICY);
@@ -315,7 +315,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
// 3 Generations is not allowed.
{
auto result_3gen_submit = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_3gen, /*test_accept=*/false);
+ package_3gen, /*test_accept=*/false, /*max_sane_feerate=*/{});
BOOST_CHECK(result_3gen_submit.m_state.IsInvalid());
BOOST_CHECK_EQUAL(result_3gen_submit.m_state.GetResult(), PackageValidationResult::PCKG_POLICY);
BOOST_CHECK_EQUAL(result_3gen_submit.m_state.GetRejectReason(), "package-not-child-with-parents");
@@ -332,7 +332,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
CTransactionRef tx_parent_invalid = MakeTransactionRef(mtx_parent_invalid);
Package package_invalid_parent{tx_parent_invalid, tx_child};
auto result_quit_early = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_invalid_parent, /*test_accept=*/ false);
+ package_invalid_parent, /*test_accept=*/ false, /*max_sane_feerate=*/{});
if (auto err_parent_invalid{CheckPackageMempoolAcceptResult(package_invalid_parent, result_quit_early, /*expect_valid=*/false, m_node.mempool.get())}) {
BOOST_ERROR(err_parent_invalid.value());
} else {
@@ -353,7 +353,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
package_missing_parent.push_back(MakeTransactionRef(mtx_child));
{
const auto result_missing_parent = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_missing_parent, /*test_accept=*/false);
+ package_missing_parent, /*test_accept=*/false, /*max_sane_feerate=*/{});
BOOST_CHECK(result_missing_parent.m_state.IsInvalid());
BOOST_CHECK_EQUAL(result_missing_parent.m_state.GetResult(), PackageValidationResult::PCKG_POLICY);
BOOST_CHECK_EQUAL(result_missing_parent.m_state.GetRejectReason(), "package-not-child-with-unconfirmed-parents");
@@ -363,7 +363,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
// Submit package with parent + child.
{
const auto submit_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_parent_child, /*test_accept=*/false);
+ package_parent_child, /*test_accept=*/false, /*max_sane_feerate=*/{});
expected_pool_size += 2;
BOOST_CHECK_MESSAGE(submit_parent_child.m_state.IsValid(),
"Package validation unexpectedly failed: " << submit_parent_child.m_state.GetRejectReason());
@@ -385,7 +385,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
// Already-in-mempool transactions should be detected and de-duplicated.
{
const auto submit_deduped = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_parent_child, /*test_accept=*/false);
+ package_parent_child, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_deduped{CheckPackageMempoolAcceptResult(package_parent_child, submit_deduped, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_deduped.value());
} else {
@@ -456,7 +456,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
{
Package package_parent_child1{ptx_parent, ptx_child1};
const auto submit_witness1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_parent_child1, /*test_accept=*/false);
+ package_parent_child1, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_witness1{CheckPackageMempoolAcceptResult(package_parent_child1, submit_witness1, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_witness1.value());
}
@@ -464,7 +464,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
// Child2 would have been validated individually.
Package package_parent_child2{ptx_parent, ptx_child2};
const auto submit_witness2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_parent_child2, /*test_accept=*/false);
+ package_parent_child2, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_witness2{CheckPackageMempoolAcceptResult(package_parent_child2, submit_witness2, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_witness2.value());
} else {
@@ -478,7 +478,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
// Deduplication should work when wtxid != txid. Submit package with the already-in-mempool
// transactions again, which should not fail.
const auto submit_segwit_dedup = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_parent_child1, /*test_accept=*/false);
+ package_parent_child1, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_segwit_dedup{CheckPackageMempoolAcceptResult(package_parent_child1, submit_segwit_dedup, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_segwit_dedup.value());
} else {
@@ -508,7 +508,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
{
Package package_child2_grandchild{ptx_child2, ptx_grandchild};
const auto submit_spend_ignored = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_child2_grandchild, /*test_accept=*/false);
+ package_child2_grandchild, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_spend_ignored{CheckPackageMempoolAcceptResult(package_child2_grandchild, submit_spend_ignored, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_spend_ignored.value());
} else {
@@ -606,7 +606,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
// parent3 should be accepted
// child should be accepted
{
- const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false);
+ const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false, /*max_sane_feerate=*/{});
if (auto err_mixed{CheckPackageMempoolAcceptResult(package_mixed, mixed_result, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_mixed.value());
} else {
@@ -670,7 +670,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
{
BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
const auto submit_cpfp_deprio = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_cpfp, /*test_accept=*/ false);
+ package_cpfp, /*test_accept=*/ false, /*max_sane_feerate=*/{});
if (auto err_cpfp_deprio{CheckPackageMempoolAcceptResult(package_cpfp, submit_cpfp_deprio, /*expect_valid=*/false, m_node.mempool.get())}) {
BOOST_ERROR(err_cpfp_deprio.value());
} else {
@@ -692,7 +692,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
{
BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
const auto submit_cpfp = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_cpfp, /*test_accept=*/ false);
+ package_cpfp, /*test_accept=*/ false, /*max_sane_feerate=*/{});
if (auto err_cpfp{CheckPackageMempoolAcceptResult(package_cpfp, submit_cpfp, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_cpfp.value());
} else {
@@ -744,7 +744,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
// Cheap package should fail for being too low fee.
{
const auto submit_package_too_low = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_still_too_low, /*test_accept=*/false);
+ package_still_too_low, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_package_too_low{CheckPackageMempoolAcceptResult(package_still_too_low, submit_package_too_low, /*expect_valid=*/false, m_node.mempool.get())}) {
BOOST_ERROR(err_package_too_low.value());
} else {
@@ -770,7 +770,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
// Now that the child's fees have "increased" by 1 BTC, the cheap package should succeed.
{
const auto submit_prioritised_package = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_still_too_low, /*test_accept=*/false);
+ package_still_too_low, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_prioritised{CheckPackageMempoolAcceptResult(package_still_too_low, submit_prioritised_package, /*expect_valid=*/true, m_node.mempool.get())}) {
BOOST_ERROR(err_prioritised.value());
} else {
@@ -818,7 +818,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
{
BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
const auto submit_rich_parent = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
- package_rich_parent, /*test_accept=*/false);
+ package_rich_parent, /*test_accept=*/false, /*max_sane_feerate=*/{});
if (auto err_rich_parent{CheckPackageMempoolAcceptResult(package_rich_parent, submit_rich_parent, /*expect_valid=*/false, m_node.mempool.get())}) {
BOOST_ERROR(err_rich_parent.value());
} else {
diff --git a/src/test/txvalidation_tests.cpp b/src/test/txvalidation_tests.cpp
index e045949b43..95583b53bf 100644
--- a/src/test/txvalidation_tests.cpp
+++ b/src/test/txvalidation_tests.cpp
@@ -115,7 +115,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
tx_v2_from_v3->GetHash().ToString(), tx_v2_from_v3->GetWitnessHash().ToString(),
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
- BOOST_CHECK(*SingleV3Checks(tx_v2_from_v3, *ancestors_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3)) == expected_error_str);
+ auto result_v2_from_v3{SingleV3Checks(tx_v2_from_v3, *ancestors_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3))};
+ BOOST_CHECK_EQUAL(result_v2_from_v3->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result_v2_from_v3->second, nullptr);
Package package_v3_v2{mempool_tx_v3, tx_v2_from_v3};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), package_v3_v2, empty_ancestors), expected_error_str);
@@ -130,8 +132,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str_2{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
tx_v2_from_v2_and_v3->GetHash().ToString(), tx_v2_from_v2_and_v3->GetWitnessHash().ToString(),
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
- BOOST_CHECK(*SingleV3Checks(tx_v2_from_v2_and_v3, *ancestors_v2_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))
- == expected_error_str_2);
+ auto result_v2_from_both{SingleV3Checks(tx_v2_from_v2_and_v3, *ancestors_v2_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))};
+ BOOST_CHECK_EQUAL(result_v2_from_both->first, expected_error_str_2);
+ BOOST_CHECK_EQUAL(result_v2_from_both->second, nullptr);
Package package_v3_v2_v2{mempool_tx_v3, mempool_tx_v2, tx_v2_from_v2_and_v3};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v2_and_v3, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3), package_v3_v2_v2, empty_ancestors), expected_error_str_2);
@@ -147,7 +150,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
tx_v3_from_v2->GetHash().ToString(), tx_v3_from_v2->GetWitnessHash().ToString(),
mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())};
- BOOST_CHECK(*SingleV3Checks(tx_v3_from_v2, *ancestors_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2)) == expected_error_str);
+ auto result_v3_from_v2{SingleV3Checks(tx_v3_from_v2, *ancestors_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2))};
+ BOOST_CHECK_EQUAL(result_v3_from_v2->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result_v3_from_v2->second, nullptr);
Package package_v2_v3{mempool_tx_v2, tx_v3_from_v2};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), package_v2_v3, empty_ancestors), expected_error_str);
@@ -162,8 +167,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str_2{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
tx_v3_from_v2_and_v3->GetHash().ToString(), tx_v3_from_v2_and_v3->GetWitnessHash().ToString(),
mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())};
- BOOST_CHECK(*SingleV3Checks(tx_v3_from_v2_and_v3, *ancestors_v3_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))
- == expected_error_str_2);
+ auto result_v3_from_both{SingleV3Checks(tx_v3_from_v2_and_v3, *ancestors_v3_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))};
+ BOOST_CHECK_EQUAL(result_v3_from_both->first, expected_error_str_2);
+ BOOST_CHECK_EQUAL(result_v3_from_both->second, nullptr);
// tx_v3_from_v2_and_v3 also violates V3_ANCESTOR_LIMIT.
const auto expected_error_str_3{strprintf("tx %s (wtxid=%s) would have too many ancestors",
@@ -215,8 +221,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
BOOST_CHECK_EQUAL(ancestors->size(), 3);
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors",
tx_v3_multi_parent->GetHash().ToString(), tx_v3_multi_parent->GetWitnessHash().ToString())};
- BOOST_CHECK_EQUAL(*SingleV3Checks(tx_v3_multi_parent, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent)),
- expected_error_str);
+ auto result{SingleV3Checks(tx_v3_multi_parent, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent))};
+ BOOST_CHECK_EQUAL(result->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result->second, nullptr);
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_multi_parent, GetVirtualTransactionSize(*tx_v3_multi_parent), package_multi_parents, empty_ancestors),
expected_error_str);
@@ -239,8 +246,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_gen), m_limits)};
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors",
tx_v3_multi_gen->GetHash().ToString(), tx_v3_multi_gen->GetWitnessHash().ToString())};
- BOOST_CHECK_EQUAL(*SingleV3Checks(tx_v3_multi_gen, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen)),
- expected_error_str);
+ auto result{SingleV3Checks(tx_v3_multi_gen, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen))};
+ BOOST_CHECK_EQUAL(result->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result->second, nullptr);
// Middle tx is what triggers a failure for the grandchild:
BOOST_CHECK_EQUAL(*PackageV3Checks(middle_tx, GetVirtualTransactionSize(*middle_tx), package_multi_gen, empty_ancestors), expected_error_str);
@@ -256,8 +264,9 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child_big), m_limits)};
const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
tx_v3_child_big->GetHash().ToString(), tx_v3_child_big->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE)};
- BOOST_CHECK_EQUAL(*SingleV3Checks(tx_v3_child_big, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big)),
- expected_error_str);
+ auto result{SingleV3Checks(tx_v3_child_big, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big))};
+ BOOST_CHECK_EQUAL(result->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result->second, nullptr);
Package package_child_big{mempool_tx_v3, tx_v3_child_big};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child_big, GetVirtualTransactionSize(*tx_v3_child_big), package_child_big, empty_ancestors),
@@ -298,9 +307,10 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
tx_many_sigops->GetHash().ToString(), tx_many_sigops->GetWitnessHash().ToString(),
total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, V3_CHILD_MAX_VSIZE)};
- BOOST_CHECK_EQUAL(*SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set,
- GetVirtualTransactionSize(*tx_many_sigops, /*nSigOpCost=*/total_sigops, /*bytes_per_sigop=*/ DEFAULT_BYTES_PER_SIGOP)),
- expected_error_str);
+ auto result{SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set,
+ GetVirtualTransactionSize(*tx_many_sigops, /*nSigOpCost=*/total_sigops, /*bytes_per_sigop=*/ DEFAULT_BYTES_PER_SIGOP))};
+ BOOST_CHECK_EQUAL(result->first, expected_error_str);
+ BOOST_CHECK_EQUAL(result->second, nullptr);
Package package_child_sigops{mempool_tx_v3, tx_many_sigops};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_many_sigops, total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, package_child_sigops, empty_ancestors),
@@ -319,22 +329,58 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
BOOST_CHECK(PackageV3Checks(tx_mempool_v3_child, GetVirtualTransactionSize(*tx_mempool_v3_child), package_v3_1p1c, empty_ancestors) == std::nullopt);
}
- // A v3 transaction cannot have more than 1 descendant.
- // Configuration where tx has multiple direct children.
+ // A v3 transaction cannot have more than 1 descendant. Sibling is returned when exactly 1 exists.
{
auto tx_v3_child2 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 1}}, /*version=*/3);
- auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child2), m_limits)};
+
+ // Configuration where parent already has 1 other child in mempool
+ auto ancestors_1sibling{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child2), m_limits)};
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
- BOOST_CHECK_EQUAL(*SingleV3Checks(tx_v3_child2, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2)),
- expected_error_str);
- // If replacing the child, make sure there is no double-counting.
- BOOST_CHECK(SingleV3Checks(tx_v3_child2, *ancestors, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2))
+ auto result_with_sibling_eviction{SingleV3Checks(tx_v3_child2, *ancestors_1sibling, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2))};
+ BOOST_CHECK_EQUAL(result_with_sibling_eviction->first, expected_error_str);
+ // The other mempool child is returned to allow for sibling eviction.
+ BOOST_CHECK_EQUAL(result_with_sibling_eviction->second, tx_mempool_v3_child);
+
+ // If directly replacing the child, make sure there is no double-counting.
+ BOOST_CHECK(SingleV3Checks(tx_v3_child2, *ancestors_1sibling, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2))
== std::nullopt);
Package package_v3_1p2c{mempool_tx_v3, tx_mempool_v3_child, tx_v3_child2};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child2, GetVirtualTransactionSize(*tx_v3_child2), package_v3_1p2c, empty_ancestors),
expected_error_str);
+
+ // Configuration where parent already has 2 other children in mempool (no sibling eviction allowed). This may happen as the result of a reorg.
+ pool.addUnchecked(entry.FromTx(tx_v3_child2));
+ auto tx_v3_child3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 24}}, /*version=*/3);
+ auto entry_mempool_parent = pool.GetIter(mempool_tx_v3->GetHash().ToUint256()).value();
+ BOOST_CHECK_EQUAL(entry_mempool_parent->GetCountWithDescendants(), 3);
+ auto ancestors_2siblings{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child3), m_limits)};
+
+ auto result_2children{SingleV3Checks(tx_v3_child3, *ancestors_2siblings, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child3))};
+ BOOST_CHECK_EQUAL(result_2children->first, expected_error_str);
+ // The other mempool child is not returned because sibling eviction is not allowed.
+ BOOST_CHECK_EQUAL(result_2children->second, nullptr);
+ }
+
+ // Sibling eviction: parent already has 1 other child, which also has its own child (no sibling eviction allowed). This may happen as the result of a reorg.
+ {
+ auto tx_mempool_grandparent = make_tx(random_outpoints(1), /*version=*/3);
+ auto tx_mempool_sibling = make_tx({COutPoint{tx_mempool_grandparent->GetHash(), 0}}, /*version=*/3);
+ auto tx_mempool_nibling = make_tx({COutPoint{tx_mempool_sibling->GetHash(), 0}}, /*version=*/3);
+ auto tx_to_submit = make_tx({COutPoint{tx_mempool_grandparent->GetHash(), 1}}, /*version=*/3);
+
+ pool.addUnchecked(entry.FromTx(tx_mempool_grandparent));
+ pool.addUnchecked(entry.FromTx(tx_mempool_sibling));
+ pool.addUnchecked(entry.FromTx(tx_mempool_nibling));
+
+ auto ancestors_3gen{pool.CalculateMemPoolAncestors(entry.FromTx(tx_to_submit), m_limits)};
+ const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
+ tx_mempool_grandparent->GetHash().ToString(), tx_mempool_grandparent->GetWitnessHash().ToString())};
+ auto result_3gen{SingleV3Checks(tx_to_submit, *ancestors_3gen, empty_conflicts_set, GetVirtualTransactionSize(*tx_to_submit))};
+ BOOST_CHECK_EQUAL(result_3gen->first, expected_error_str);
+ // The other mempool child is not returned because sibling eviction is not allowed.
+ BOOST_CHECK_EQUAL(result_3gen->second, nullptr);
}
// Configuration where tx has multiple generations of descendants is not tested because that is
diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h
index e2a88eacdd..ff95e64b7e 100644
--- a/src/test/util/chainstate.h
+++ b/src/test/util/chainstate.h
@@ -91,13 +91,16 @@ CreateAndActivateUTXOSnapshot(
// these blocks instead
CBlockIndex *pindex = orig_tip;
while (pindex && pindex != chain.m_chain.Tip()) {
- pindex->nStatus &= ~BLOCK_HAVE_DATA;
- pindex->nStatus &= ~BLOCK_HAVE_UNDO;
- // We have to set the ASSUMED_VALID flag, because otherwise it
- // would not be possible to have a block index entry without HAVE_DATA
- // and with nTx > 0 (since we aren't setting the pruned flag);
- // see CheckBlockIndex().
- pindex->nStatus |= BLOCK_ASSUMED_VALID;
+ // Remove all data and validity flags by just setting
+ // BLOCK_VALID_TREE. Also reset transaction counts and sequence
+ // ids that are set when blocks are received, to make test setup
+ // more realistic and satisfy consistency checks in
+ // CheckBlockIndex().
+ assert(pindex->IsValid(BlockStatus::BLOCK_VALID_TREE));
+ pindex->nStatus = BlockStatus::BLOCK_VALID_TREE;
+ pindex->nTx = 0;
+ pindex->nChainTx = 0;
+ pindex->nSequenceId = 0;
pindex = pindex->pprev;
}
}
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 4bbab1cdcd..4bf66a55eb 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -276,9 +276,6 @@ struct SnapshotTestSetup : TestChain100Setup {
BOOST_CHECK_EQUAL(
*node::ReadSnapshotBaseBlockhash(found),
*chainman.SnapshotBlockhash());
-
- // Ensure that the genesis block was not marked assumed-valid.
- BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid());
}
const auto& au_data = ::Params().AssumeutxoForHeight(snapshot_height);
@@ -410,7 +407,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, SnapshotTestSetup)
//! - First, verify that setBlockIndexCandidates is as expected when using a single,
//! fully-validating chainstate.
//!
-//! - Then mark a region of the chain BLOCK_ASSUMED_VALID and introduce a second chainstate
+//! - Then mark a region of the chain as missing data and introduce a second chainstate
//! that will tolerate assumed-valid blocks. Run LoadBlockIndex() and ensure that the first
//! chainstate only contains fully validated blocks and the other chainstate contains all blocks,
//! except those marked assume-valid, because those entries don't HAVE_DATA.
@@ -421,7 +418,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup)
Chainstate& cs1 = chainman.ActiveChainstate();
int num_indexes{0};
- int num_assumed_valid{0};
// Blocks in range [assumed_valid_start_idx, last_assumed_valid_idx) will be
// marked as assumed-valid and not having data.
const int expected_assumed_valid{20};
@@ -456,35 +452,30 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup)
reload_all_block_indexes();
BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.size(), 1);
- // Mark some region of the chain assumed-valid, and remove the HAVE_DATA flag.
+ // Reset some region of the chain's nStatus, removing the HAVE_DATA flag.
for (int i = 0; i <= cs1.m_chain.Height(); ++i) {
LOCK(::cs_main);
auto index = cs1.m_chain[i];
- // Blocks with heights in range [91, 110] are marked ASSUMED_VALID
+ // Blocks with heights in range [91, 110] are marked as missing data.
if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) {
- index->nStatus = BlockStatus::BLOCK_VALID_TREE | BlockStatus::BLOCK_ASSUMED_VALID;
+ index->nStatus = BlockStatus::BLOCK_VALID_TREE;
+ index->nTx = 0;
+ index->nChainTx = 0;
}
++num_indexes;
- if (index->IsAssumedValid()) ++num_assumed_valid;
// Note the last fully-validated block as the expected validated tip.
if (i == (assumed_valid_start_idx - 1)) {
validated_tip = index;
- BOOST_CHECK(!index->IsAssumedValid());
}
// Note the last assumed valid block as the snapshot base
if (i == last_assumed_valid_idx - 1) {
assumed_base = index;
- BOOST_CHECK(index->IsAssumedValid());
- } else if (i == last_assumed_valid_idx) {
- BOOST_CHECK(!index->IsAssumedValid());
}
}
- BOOST_CHECK_EQUAL(expected_assumed_valid, num_assumed_valid);
-
// Note: cs2's tip is not set when ActivateExistingSnapshot is called.
Chainstate& cs2 = WITH_LOCK(::cs_main,
return chainman.ActivateExistingSnapshot(*assumed_base->phashBlock));
diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp
index 4de8833a3f..bce5602462 100644
--- a/src/util/fs_helpers.cpp
+++ b/src/util/fs_helpers.cpp
@@ -69,7 +69,7 @@ LockResult LockDirectory(const fs::path& directory, const fs::path& lockfile_nam
}
auto lock = std::make_unique<fsbridge::FileLock>(pathLockFile);
if (!lock->TryLock()) {
- error("Error while attempting to lock directory %s: %s", fs::PathToString(directory), lock->GetReason());
+ LogError("Error while attempting to lock directory %s: %s\n", fs::PathToString(directory), lock->GetReason());
return LockResult::ErrorLock;
}
if (!probe_only) {
@@ -249,20 +249,9 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate)
bool RenameOver(fs::path src, fs::path dest)
{
-#ifdef __MINGW64__
- // This is a workaround for a bug in libstdc++ which
- // implements fs::rename with _wrename function.
- // This bug has been fixed in upstream:
- // - GCC 10.3: 8dd1c1085587c9f8a21bb5e588dfe1e8cdbba79e
- // - GCC 11.1: 1dfd95f0a0ca1d9e6cbc00e6cbfd1fa20a98f312
- // For more details see the commits mentioned above.
- return MoveFileExW(src.wstring().c_str(), dest.wstring().c_str(),
- MOVEFILE_REPLACE_EXISTING) != 0;
-#else
std::error_code error;
fs::rename(src, dest, error);
return !error;
-#endif
}
/**
diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp
index a54f408496..7b5ded2975 100644
--- a/src/util/strencodings.cpp
+++ b/src/util/strencodings.cpp
@@ -81,6 +81,8 @@ template <typename Byte>
std::optional<std::vector<Byte>> TryParseHex(std::string_view str)
{
std::vector<Byte> vch;
+ vch.reserve(str.size() / 2); // two hex characters form a single byte
+
auto it = str.begin();
while (it != str.end()) {
if (IsSpace(*it)) {
@@ -444,6 +446,7 @@ bool ParseFixedPoint(std::string_view val, int decimals, int64_t *amount_out)
std::string ToLower(std::string_view str)
{
std::string r;
+ r.reserve(str.size());
for (auto ch : str) r += ToLower(ch);
return r;
}
@@ -451,6 +454,7 @@ std::string ToLower(std::string_view str)
std::string ToUpper(std::string_view str)
{
std::string r;
+ r.reserve(str.size());
for (auto ch : str) r += ToUpper(ch);
return r;
}
diff --git a/src/validation.cpp b/src/validation.cpp
index c15e660499..0feda3f8a5 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -472,6 +472,11 @@ public:
* policies such as mempool min fee and min relay fee.
*/
const bool m_package_feerates;
+ /** Used for local submission of transactions to catch "absurd" fees
+ * due to fee miscalculation by wallets. std:nullopt implies unset, allowing any feerates.
+ * Any individual transaction failing this check causes immediate failure.
+ */
+ const std::optional<CFeeRate> m_client_maxfeerate;
/** Parameters for single transaction mempool validation. */
static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time,
@@ -485,6 +490,7 @@ public:
/* m_allow_replacement */ true,
/* m_package_submission */ false,
/* m_package_feerates */ false,
+ /* m_client_maxfeerate */ {}, // checked by caller
};
}
@@ -499,12 +505,13 @@ public:
/* m_allow_replacement */ false,
/* m_package_submission */ false, // not submitting to mempool
/* m_package_feerates */ false,
+ /* m_client_maxfeerate */ {}, // checked by caller
};
}
/** Parameters for child-with-unconfirmed-parents package validation. */
static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time,
- std::vector<COutPoint>& coins_to_uncache) {
+ std::vector<COutPoint>& coins_to_uncache, std::optional<CFeeRate>& client_maxfeerate) {
return ATMPArgs{/* m_chainparams */ chainparams,
/* m_accept_time */ accept_time,
/* m_bypass_limits */ false,
@@ -513,6 +520,7 @@ public:
/* m_allow_replacement */ false,
/* m_package_submission */ true,
/* m_package_feerates */ true,
+ /* m_client_maxfeerate */ client_maxfeerate,
};
}
@@ -526,6 +534,7 @@ public:
/* m_allow_replacement */ true,
/* m_package_submission */ true, // do not LimitMempoolSize in Finalize()
/* m_package_feerates */ false, // only 1 transaction
+ /* m_client_maxfeerate */ package_args.m_client_maxfeerate,
};
}
@@ -539,7 +548,8 @@ public:
bool test_accept,
bool allow_replacement,
bool package_submission,
- bool package_feerates)
+ bool package_feerates,
+ std::optional<CFeeRate> client_maxfeerate)
: m_chainparams{chainparams},
m_accept_time{accept_time},
m_bypass_limits{bypass_limits},
@@ -547,7 +557,8 @@ public:
m_test_accept{test_accept},
m_allow_replacement{allow_replacement},
m_package_submission{package_submission},
- m_package_feerates{package_feerates}
+ m_package_feerates{package_feerates},
+ m_client_maxfeerate{client_maxfeerate}
{
}
};
@@ -589,12 +600,14 @@ private:
// of checking a given transaction.
struct Workspace {
explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
- /** Txids of mempool transactions that this transaction directly conflicts with. */
+ /** Txids of mempool transactions that this transaction directly conflicts with or may
+ * replace via sibling eviction. */
std::set<Txid> m_conflicts;
- /** Iterators to mempool entries that this transaction directly conflicts with. */
+ /** Iterators to mempool entries that this transaction directly conflicts with or may
+ * replace via sibling eviction. */
CTxMemPool::setEntries m_iters_conflicting;
/** Iterators to all mempool entries that would be replaced by this transaction, including
- * those it directly conflicts with and their descendants. */
+ * m_conflicts and their descendants. */
CTxMemPool::setEntries m_all_conflicting;
/** All mempool ancestors of this transaction. */
CTxMemPool::setEntries m_ancestors;
@@ -602,9 +615,12 @@ private:
* inserted into the mempool until Finalize(). */
std::unique_ptr<CTxMemPoolEntry> m_entry;
/** Pointers to the transactions that have been removed from the mempool and replaced by
- * this transaction, used to return to the MemPoolAccept caller. Only populated if
+ * this transaction (everything in m_all_conflicting), used to return to the MemPoolAccept caller. Only populated if
* validation is successful and the original transactions are removed. */
std::list<CTransactionRef> m_replaced_transactions;
+ /** Whether RBF-related data structures (m_conflicts, m_iters_conflicting, m_all_conflicting,
+ * m_replaced_transactions) include a sibling in addition to txns with conflicting inputs. */
+ bool m_sibling_eviction{false};
/** Virtual size of the transaction as used by the mempool, calculated using serialized size
* of the transaction and sigops. */
@@ -694,7 +710,8 @@ private:
Chainstate& m_active_chainstate;
- /** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */
+ /** Whether the transaction(s) would replace any mempool transactions and/or evict any siblings.
+ * If so, RBF rules apply. */
bool m_rbf{false};
};
@@ -958,8 +975,27 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
ws.m_ancestors = *ancestors;
- if (const auto err_string{SingleV3Checks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "v3-rule-violation", *err_string);
+ // Even though just checking direct mempool parents for inheritance would be sufficient, we
+ // check using the full ancestor set here because it's more convenient to use what we have
+ // already calculated.
+ if (const auto err{SingleV3Checks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
+ // Disabled within package validation.
+ if (err->second != nullptr && args.m_allow_replacement) {
+ // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
+ // included in RBF checks.
+ ws.m_conflicts.insert(err->second->GetHash());
+ // Adding the sibling to m_iters_conflicting here means that it doesn't count towards
+ // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from
+ // the descendant count is done separately in SingleV3Checks for v3 transactions.
+ ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value());
+ ws.m_sibling_eviction = true;
+ // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks.
+ // Note that we are not checking whether it opts in to replaceability via BIP125 or v3
+ // (which is normally done in PreChecks). However, the only way a v3 transaction can
+ // have a non-v3 and non-BIP125 descendant is due to a reorg.
+ } else {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "v3-rule-violation", err->first);
+ }
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
@@ -999,18 +1035,21 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
// Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
// TX_RECONSIDERABLE, because it cannot be bypassed using package validation.
// This must be changed if package RBF is enabled.
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
+ strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Calculate all conflicting entries and enforce Rule #5.
if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
- "too many potential replacements", *err_string);
+ strprintf("too many potential replacements%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Enforce Rule #2.
if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
+ // Sibling eviction is only done for v3 transactions, which cannot have multiple ancestors.
+ Assume(!ws.m_sibling_eviction);
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
- "replacement-adds-unconfirmed", *err_string);
+ strprintf("replacement-adds-unconfirmed%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Check if it's economically rational to mine this transaction rather than the ones it
// replaces and pays for its own relay fees. Enforce Rules #3 and #4.
@@ -1023,7 +1062,8 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
// Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
// TX_RECONSIDERABLE, because it cannot be bypassed using package validation.
// This must be changed if package RBF is enabled.
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
+ strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
return true;
}
@@ -1256,6 +1296,12 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef
return MempoolAcceptResult::Failure(ws.m_state);
}
+ // Individual modified feerate exceeded caller-defined max; abort
+ if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()) {
+ ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "max feerate exceeded", "");
+ return MempoolAcceptResult::Failure(ws.m_state);
+ }
+
if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
// Perform the inexpensive checks first and avoid hashing and signature verification unless
@@ -1316,6 +1362,16 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
return PackageMempoolAcceptResult(package_state, std::move(results));
}
+
+ // Individual modified feerate exceeded caller-defined max; abort
+ // N.B. this doesn't take into account CPFPs. Chunk-aware validation may be more robust.
+ if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()) {
+ package_state.Invalid(PackageValidationResult::PCKG_TX, "max feerate exceeded");
+ // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
+ results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
+ return PackageMempoolAcceptResult(package_state, std::move(results));
+ }
+
// Make the coins created by this transaction available for subsequent transactions in the
// package to spend. Since we already checked conflicts in the package and we don't allow
// replacements, we don't need to track the coins spent. Note that this logic will need to be
@@ -1660,7 +1716,7 @@ MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTra
}
PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool,
- const Package& package, bool test_accept)
+ const Package& package, bool test_accept, std::optional<CFeeRate> client_maxfeerate)
{
AssertLockHeld(cs_main);
assert(!package.empty());
@@ -1674,7 +1730,7 @@ PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxM
auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
} else {
- auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache);
+ auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache, client_maxfeerate);
return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args);
}
}();
@@ -1995,10 +2051,10 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
return true;
}
-bool FatalError(Notifications& notifications, BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
+bool FatalError(Notifications& notifications, BlockValidationState& state, const bilingual_str& message)
{
- notifications.fatalError(strMessage, userMessage);
- return state.Error(strMessage);
+ notifications.fatalError(message);
+ return state.Error(message.original);
}
/**
@@ -2045,12 +2101,12 @@ DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIn
CBlockUndo blockUndo;
if (!m_blockman.UndoReadFromDisk(blockUndo, *pindex)) {
- error("DisconnectBlock(): failure reading undo data");
+ LogError("DisconnectBlock(): failure reading undo data\n");
return DISCONNECT_FAILED;
}
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
- error("DisconnectBlock(): block and undo data inconsistent");
+ LogError("DisconnectBlock(): block and undo data inconsistent\n");
return DISCONNECT_FAILED;
}
@@ -2089,7 +2145,7 @@ DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIn
if (i > 0) { // not coinbases
CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size()) {
- error("DisconnectBlock(): transaction and undo data inconsistent");
+ LogError("DisconnectBlock(): transaction and undo data inconsistent\n");
return DISCONNECT_FAILED;
}
for (unsigned int j = tx.vin.size(); j > 0;) {
@@ -2220,9 +2276,10 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state,
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
// problems.
- return FatalError(m_chainman.GetNotifications(), state, "Corrupt block found indicating potential hardware failure; shutting down");
+ return FatalError(m_chainman.GetNotifications(), state, _("Corrupt block found indicating potential hardware failure."));
}
- return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
+ LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString());
+ return false;
}
// verify that the view's current state corresponds to the previous block
@@ -2408,7 +2465,8 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Any transaction validation failure in ConnectBlock is a block consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
+ LogError("%s: Consensus::CheckTxInputs: %s, %s\n", __func__, tx.GetHash().ToString(), state.ToString());
+ return false;
}
nFees += txfee;
if (!MoneyRange(nFees)) {
@@ -2449,8 +2507,9 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Any transaction validation failure in ConnectBlock is a block consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
- return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
+ LogError("ConnectBlock(): CheckInputScripts on %s failed with %s\n",
tx.GetHash().ToString(), state.ToString());
+ return false;
}
control.Add(std::move(vChecks));
}
@@ -2643,7 +2702,7 @@ bool Chainstate::FlushStateToDisk(
if (fDoFullFlush || fPeriodicWrite) {
// Ensure we can write block index
if (!CheckDiskSpace(m_blockman.m_opts.blocks_dir)) {
- return FatalError(m_chainman.GetNotifications(), state, "Disk space is too low!", _("Disk space is too low!"));
+ return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!"));
}
{
LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
@@ -2661,7 +2720,7 @@ bool Chainstate::FlushStateToDisk(
LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
if (!m_blockman.WriteBlockIndexDB()) {
- return FatalError(m_chainman.GetNotifications(), state, "Failed to write to block index database");
+ return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to block index database."));
}
}
// Finally remove any pruned files
@@ -2683,11 +2742,11 @@ bool Chainstate::FlushStateToDisk(
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(m_chainman.m_options.datadir, 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
- return FatalError(m_chainman.GetNotifications(), state, "Disk space is too low!", _("Disk space is too low!"));
+ return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!"));
}
// Flush the chainstate (which may refer to block index entries).
if (!CoinsTip().Flush())
- return FatalError(m_chainman.GetNotifications(), state, "Failed to write to coin database");
+ return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to coin database."));
m_last_flush = nNow;
full_flush_completed = true;
TRACE5(utxocache, flush,
@@ -2703,7 +2762,7 @@ bool Chainstate::FlushStateToDisk(
m_chainman.m_options.signals->ChainStateFlushed(this->GetRole(), m_chain.GetLocator());
}
} catch (const std::runtime_error& e) {
- return FatalError(m_chainman.GetNotifications(), state, std::string("System error while flushing: ") + e.what());
+ return FatalError(m_chainman.GetNotifications(), state, strprintf(_("System error while flushing: %s"), e.what()));
}
return true;
}
@@ -2823,15 +2882,18 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock& block = *pblock;
if (!m_blockman.ReadBlockFromDisk(block, *pindexDelete)) {
- return error("DisconnectTip(): Failed to read block");
+ LogError("DisconnectTip(): Failed to read block\n");
+ return false;
}
// Apply the block atomically to the chain state.
const auto time_start{SteadyClock::now()};
{
CCoinsViewCache view(&CoinsTip());
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
- if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
- return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
+ if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) {
+ LogError("DisconnectTip(): DisconnectBlock %s failed\n", pindexDelete->GetBlockHash().ToString());
+ return false;
+ }
bool flushed = view.Flush();
assert(flushed);
}
@@ -2936,7 +2998,7 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew,
if (!pblock) {
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
if (!m_blockman.ReadBlockFromDisk(*pblockNew, *pindexNew)) {
- return FatalError(m_chainman.GetNotifications(), state, "Failed to read block");
+ return FatalError(m_chainman.GetNotifications(), state, _("Failed to read block."));
}
pthisBlock = pblockNew;
} else {
@@ -2960,7 +3022,8 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew,
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
- return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
+ LogError("%s: ConnectBlock %s failed, %s\n", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
+ return false;
}
time_3 = SteadyClock::now();
time_connect_total += time_3 - time_2;
@@ -3122,7 +3185,7 @@ bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex*
// If we're unable to disconnect a block during normal operation,
// then that is a failure of our local system -- we should abort
// rather than stay on a less work chain.
- FatalError(m_chainman.GetNotifications(), state, "Failed to disconnect block; see debug.log for details");
+ FatalError(m_chainman.GetNotifications(), state, _("Failed to disconnect block."));
return false;
}
fBlocksDisconnected = true;
@@ -3624,7 +3687,18 @@ void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockInd
{
AssertLockHeld(cs_main);
pindexNew->nTx = block.vtx.size();
- pindexNew->nChainTx = 0;
+ // Typically nChainTx will be 0 at this point, but it can be nonzero if this
+ // is a pruned block which is being downloaded again, or if this is an
+ // assumeutxo snapshot block which has a hardcoded nChainTx value from the
+ // snapshot metadata. If the pindex is not the snapshot block and the
+ // nChainTx value is not zero, assert that value is actually correct.
+ auto prev_tx_sum = [](CBlockIndex& block) { return block.nTx + (block.pprev ? block.pprev->nChainTx : 0); };
+ if (!Assume(pindexNew->nChainTx == 0 || pindexNew->nChainTx == prev_tx_sum(*pindexNew) ||
+ pindexNew == GetSnapshotBaseBlock())) {
+ LogWarning("Internal bug detected: block %d has unexpected nChainTx %i that should be %i (%s %s). Please report this issue here: %s\n",
+ pindexNew->nHeight, pindexNew->nChainTx, prev_tx_sum(*pindexNew), PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT);
+ pindexNew->nChainTx = 0;
+ }
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
@@ -3644,7 +3718,15 @@ void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockInd
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
- pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
+ // Before setting nChainTx, assert that it is 0 or already set to
+ // the correct value. This assert will fail after receiving the
+ // assumeutxo snapshot block if assumeutxo snapshot metadata has an
+ // incorrect hardcoded AssumeutxoData::nChainTx value.
+ if (!Assume(pindex->nChainTx == 0 || pindex->nChainTx == prev_tx_sum(*pindex))) {
+ LogWarning("Internal bug detected: block %d has unexpected nChainTx %i that should be %i (%s %s). Please report this issue here: %s\n",
+ pindex->nHeight, pindex->nChainTx, prev_tx_sum(*pindex), PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT);
+ }
+ pindex->nChainTx = prev_tx_sum(*pindex);
pindex->nSequenceId = nBlockSequenceId++;
for (Chainstate *c : GetAll()) {
c->TryAddBlockIndexCandidate(pindex);
@@ -4152,9 +4234,10 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>&
if (NotifyHeaderTip(*this)) {
if (IsInitialBlockDownload() && ppindex && *ppindex) {
const CBlockIndex& last_accepted{**ppindex};
- const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / GetConsensus().nPowTargetSpacing};
+ int64_t blocks_left{(NodeClock::now() - last_accepted.Time()) / GetConsensus().PowTargetSpacing()};
+ blocks_left = std::max<int64_t>(0, blocks_left);
const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)};
- LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);
+ LogInfo("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);
}
}
return true;
@@ -4178,9 +4261,10 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t
bool initial_download = IsInitialBlockDownload();
GetNotifications().headerTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true);
if (initial_download) {
- const int64_t blocks_left{(GetTime() - timestamp) / GetConsensus().nPowTargetSpacing};
+ int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()};
+ blocks_left = std::max<int64_t>(0, blocks_left);
const double progress{100.0 * height / (height + blocks_left)};
- LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);
+ LogInfo("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);
}
}
@@ -4243,7 +4327,8 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr<const CBlock>& pblock,
pindex->nStatus |= BLOCK_FAILED_VALID;
m_blockman.m_dirty_blockindex.insert(pindex);
}
- return error("%s: %s", __func__, state.ToString());
+ LogError("%s: %s\n", __func__, state.ToString());
+ return false;
}
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
@@ -4262,7 +4347,7 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr<const CBlock>& pblock,
}
ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error& e) {
- return FatalError(GetNotifications(), state, std::string("System error: ") + e.what());
+ return FatalError(GetNotifications(), state, strprintf(_("System error while saving block to disk: %s"), e.what()));
}
// TODO: FlushStateToDisk() handles flushing of both block and chainstate
@@ -4306,7 +4391,8 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo
if (m_options.signals) {
m_options.signals->BlockChecked(*block, state);
}
- return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
+ LogError("%s: AcceptBlock FAILED (%s)\n", __func__, state.ToString());
+ return false;
}
}
@@ -4314,13 +4400,15 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo
BlockValidationState state; // Only used to report errors, not invalidity - ignore it
if (!ActiveChainstate().ActivateBestChain(state, block)) {
- return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
+ LogError("%s: ActivateBestChain failed (%s)\n", __func__, state.ToString());
+ return false;
}
Chainstate* bg_chain{WITH_LOCK(cs_main, return BackgroundSyncInProgress() ? m_ibd_chainstate.get() : nullptr)};
BlockValidationState bg_state;
if (bg_chain && !bg_chain->ActivateBestChain(bg_state, block)) {
- return error("%s: [background] ActivateBestChain failed (%s)", __func__, bg_state.ToString());
+ LogError("%s: [background] ActivateBestChain failed (%s)\n", __func__, bg_state.ToString());
+ return false;
}
return true;
@@ -4358,12 +4446,18 @@ bool TestBlockValidity(BlockValidationState& state,
indexDummy.phashBlock = &block_hash;
// NOTE: CheckBlockHeader is called by CheckBlock
- if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
- if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
- return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
- if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev))
- return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
+ if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev)) {
+ LogError("%s: Consensus::ContextualCheckBlockHeader: %s\n", __func__, state.ToString());
+ return false;
+ }
+ if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) {
+ LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString());
+ return false;
+ }
+ if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) {
+ LogError("%s: Consensus::ContextualCheckBlock: %s\n", __func__, state.ToString());
+ return false;
+ }
if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
return false;
}
@@ -4567,7 +4661,8 @@ bool Chainstate::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& in
// TODO: merge with ConnectBlock
CBlock block;
if (!m_blockman.ReadBlockFromDisk(block, *pindex)) {
- return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogError("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return false;
}
for (const CTransactionRef& tx : block.vtx) {
@@ -4591,7 +4686,10 @@ bool Chainstate::ReplayBlocks()
std::vector<uint256> hashHeads = db.GetHeadBlocks();
if (hashHeads.empty()) return true; // We're already in a consistent state.
- if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
+ if (hashHeads.size() != 2) {
+ LogError("ReplayBlocks(): unknown inconsistent state\n");
+ return false;
+ }
m_chainman.GetNotifications().progress(_("Replaying blocks…"), 0, false);
LogPrintf("Replaying blocks\n");
@@ -4601,13 +4699,15 @@ bool Chainstate::ReplayBlocks()
const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
- return error("ReplayBlocks(): reorganization to unknown block requested");
+ LogError("ReplayBlocks(): reorganization to unknown block requested\n");
+ return false;
}
pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
- return error("ReplayBlocks(): reorganization from unknown block requested");
+ LogError("ReplayBlocks(): reorganization from unknown block requested\n");
+ return false;
}
pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
@@ -4619,12 +4719,14 @@ bool Chainstate::ReplayBlocks()
if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
CBlock block;
if (!m_blockman.ReadBlockFromDisk(block, *pindexOld)) {
- return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ LogError("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ return false;
}
LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
if (res == DISCONNECT_FAILED) {
- return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ LogError("RollbackBlock(): DisconnectBlock failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ return false;
}
// If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
// overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
@@ -4743,12 +4845,14 @@ bool Chainstate::LoadGenesisBlock()
const CBlock& block = params.GenesisBlock();
FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, nullptr)};
if (blockPos.IsNull()) {
- return error("%s: writing genesis block to disk failed", __func__);
+ LogError("%s: writing genesis block to disk failed\n", __func__);
+ return false;
}
CBlockIndex* pindex = m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
m_chainman.ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error& e) {
- return error("%s: failed to write genesis block: %s", __func__, e.what());
+ LogError("%s: failed to write genesis block: %s\n", __func__, e.what());
+ return false;
}
return true;
@@ -4927,7 +5031,7 @@ void ChainstateManager::LoadExternalBlockFile(
}
}
} catch (const std::runtime_error& e) {
- GetNotifications().fatalError(std::string("System error: ") + e.what());
+ GetNotifications().fatalError(strprintf(_("System error while loading external block file: %s"), e.what()));
}
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
}
@@ -4967,16 +5071,31 @@ void ChainstateManager::CheckBlockIndex()
size_t nNodes = 0;
int nHeight = 0;
CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
- CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
- CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
+ CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA, since assumeutxo snapshot if used.
+ CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0, since assumeutxo snapshot if used.
CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
- CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
- CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
- CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
- CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID
+ CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not), since assumeutxo snapshot if used.
+ CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not), since assumeutxo snapshot if used.
+ CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not), since assumeutxo snapshot if used.
+
+ // After checking an assumeutxo snapshot block, reset pindexFirst pointers
+ // to earlier blocks that have not been downloaded or validated yet, so
+ // checks for later blocks can assume the earlier blocks were validated and
+ // be stricter, testing for more requirements.
+ const CBlockIndex* snap_base{GetSnapshotBaseBlock()};
+ CBlockIndex *snap_first_missing{}, *snap_first_notx{}, *snap_first_notv{}, *snap_first_nocv{}, *snap_first_nosv{};
+ auto snap_update_firsts = [&] {
+ if (pindex == snap_base) {
+ std::swap(snap_first_missing, pindexFirstMissing);
+ std::swap(snap_first_notx, pindexFirstNeverProcessed);
+ std::swap(snap_first_notv, pindexFirstNotTransactionsValid);
+ std::swap(snap_first_nocv, pindexFirstNotChainValid);
+ std::swap(snap_first_nosv, pindexFirstNotScriptsValid);
+ }
+ };
+
while (pindex != nullptr) {
nNodes++;
- if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex;
if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
pindexFirstMissing = pindex;
@@ -4984,10 +5103,7 @@ void ChainstateManager::CheckBlockIndex()
if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
- if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
- // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these
- // *_VALID_MASK flags will not be present for index entries we are temporarily assuming
- // valid.
+ if (pindex->pprev != nullptr) {
if (pindexFirstNotTransactionsValid == nullptr &&
(pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) {
pindexFirstNotTransactionsValid = pindex;
@@ -5017,36 +5133,26 @@ void ChainstateManager::CheckBlockIndex()
if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
- // Unless these indexes are assumed valid and pending block download on a
- // background chainstate.
- if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
+ if (!m_blockman.m_have_pruned) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
- if (pindexFirstAssumeValid == nullptr) {
- // If we've got some assume valid blocks, then we might have
- // missing blocks (not HAVE_DATA) but still treat them as
- // having been processed (with a fake nTx value). Otherwise, we
- // can assert that these are the same.
- assert(pindexFirstMissing == pindexFirstNeverProcessed);
- }
+ assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else {
// If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
}
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
- if (pindex->IsAssumedValid()) {
- // Assumed-valid blocks should have some nTx value.
- assert(pindex->nTx > 0);
+ if (snap_base && snap_base->GetAncestor(pindex->nHeight) == pindex) {
// Assumed-valid blocks should connect to the main chain.
assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE);
- } else {
- // Otherwise there should only be an nTx value if we have
- // actually seen a block's transactions.
- assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
}
+ // There should only be an nTx value if we have
+ // actually seen a block's transactions.
+ assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveNumChainTxs().
- assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveNumChainTxs());
- assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveNumChainTxs());
+ // HaveNumChainTxs will also be set in the assumeutxo snapshot block from snapshot metadata.
+ assert((pindexFirstNeverProcessed == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs());
+ assert((pindexFirstNotTransactionsValid == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs());
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
@@ -5059,30 +5165,64 @@ void ChainstateManager::CheckBlockIndex()
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
// Make sure nChainTx sum is correctly computed.
- unsigned int prev_chain_tx = pindex->pprev ? pindex->pprev->nChainTx : 0;
- assert((pindex->nChainTx == pindex->nTx + prev_chain_tx)
- // Transaction may be completely unset - happens if only the header was accepted but the block hasn't been processed.
- || (pindex->nChainTx == 0 && pindex->nTx == 0)
- // nChainTx may be unset, but nTx set (if a block has been accepted, but one of its predecessors hasn't been processed yet)
- || (pindex->nChainTx == 0 && prev_chain_tx == 0 && pindex->pprev)
- // Transaction counts prior to snapshot are unknown.
- || pindex->IsAssumedValid());
+ if (!pindex->pprev) {
+ // If no previous block, nTx and nChainTx must be the same.
+ assert(pindex->nChainTx == pindex->nTx);
+ } else if (pindex->pprev->nChainTx > 0 && pindex->nTx > 0) {
+ // If previous nChainTx is set and number of transactions in block is known, sum must be set.
+ assert(pindex->nChainTx == pindex->nTx + pindex->pprev->nChainTx);
+ } else {
+ // Otherwise nChainTx should only be set if this is a snapshot
+ // block, and must be set if it is.
+ assert((pindex->nChainTx != 0) == (pindex == snap_base));
+ }
+
// Chainstate-specific checks on setBlockIndexCandidates
for (auto c : GetAll()) {
if (c->m_chain.Tip() == nullptr) continue;
- if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
+ // Two main factors determine whether pindex is a candidate in
+ // setBlockIndexCandidates:
+ //
+ // - If pindex has less work than the chain tip, it should not be a
+ // candidate, and this will be asserted below. Otherwise it is a
+ // potential candidate.
+ //
+ // - If pindex or one of its parent blocks back to the genesis block
+ // or an assumeutxo snapshot never downloaded transactions
+ // (pindexFirstNeverProcessed is non-null), it should not be a
+ // candidate, and this will be asserted below. The only exception
+ // is if pindex itself is an assumeutxo snapshot block. Then it is
+ // also a potential candidate.
+ if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && (pindexFirstNeverProcessed == nullptr || pindex == snap_base)) {
+ // If pindex was detected as invalid (pindexFirstInvalid is
+ // non-null), it is not required to be in
+ // setBlockIndexCandidates.
if (pindexFirstInvalid == nullptr) {
- const bool is_active = c == &ActiveChainstate();
- // If this block sorts at least as good as the current tip and
- // is valid and we have all data for its parents, it must be in
- // setBlockIndexCandidates. m_chain.Tip() must also be there
- // even if some data has been pruned.
+ // If pindex and all its parents back to the genesis block
+ // or an assumeutxo snapshot block downloaded transactions,
+ // and the transactions were not pruned (pindexFirstMissing
+ // is null), it is a potential candidate. The check
+ // excludes pruned blocks, because if any blocks were
+ // pruned between pindex the current chain tip, pindex will
+ // only temporarily be added to setBlockIndexCandidates,
+ // before being moved to m_blocks_unlinked. This check
+ // could be improved to verify that if all blocks between
+ // the chain tip and pindex have data, pindex must be a
+ // candidate.
+ //
+ // If pindex is the chain tip, it also is a potential
+ // candidate.
//
- if ((pindexFirstMissing == nullptr || pindex == c->m_chain.Tip())) {
- // The active chainstate should always have this block
- // as a candidate, but a background chainstate should
- // only have it if it is an ancestor of the snapshot base.
- if (is_active || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) {
+ // If the chainstate was loaded from a snapshot and pindex
+ // is the base of the snapshot, pindex is also a potential
+ // candidate.
+ if (pindexFirstMissing == nullptr || pindex == c->m_chain.Tip() || pindex == c->SnapshotBase()) {
+ // If this chainstate is the active chainstate, pindex
+ // must be in setBlockIndexCandidates. Otherwise, this
+ // chainstate is a background validation chainstate, and
+ // pindex only needs to be added if it is an ancestor of
+ // the snapshot that is being validated.
+ if (c == &ActiveChainstate() || snap_base->GetAncestor(pindex->nHeight) == pindex) {
assert(c->setBlockIndexCandidates.count(pindex));
}
}
@@ -5113,7 +5253,7 @@ void ChainstateManager::CheckBlockIndex()
if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
- assert(m_blockman.m_have_pruned || pindexFirstAssumeValid != nullptr); // We must have pruned, or else we're using a snapshot (causing us to have faked the received data for some parent(s)).
+ assert(m_blockman.m_have_pruned);
// This block may have entered m_blocks_unlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
@@ -5126,7 +5266,7 @@ void ChainstateManager::CheckBlockIndex()
const bool is_active = c == &ActiveChainstate();
if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && c->setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
- if (is_active || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) {
+ if (is_active || snap_base->GetAncestor(pindex->nHeight) == pindex) {
assert(foundInUnlinked);
}
}
@@ -5137,6 +5277,7 @@ void ChainstateManager::CheckBlockIndex()
// End: actual consistency checks.
// Try descending into the first subnode.
+ snap_update_firsts();
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
@@ -5148,6 +5289,7 @@ void ChainstateManager::CheckBlockIndex()
// Move upwards until we reach a node of which we have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
+ snap_update_firsts();
// If pindex was the first with a certain property, unset the corresponding variable.
if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
@@ -5156,7 +5298,6 @@ void ChainstateManager::CheckBlockIndex()
if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
- if (pindex == pindexFirstAssumeValid) pindexFirstAssumeValid = nullptr;
// Find our parent.
CBlockIndex* pindexPar = pindex->pprev;
// Find which child we just visited.
@@ -5230,6 +5371,12 @@ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pin
if (pindex == nullptr)
return 0.0;
+ if (!Assume(pindex->nChainTx > 0)) {
+ LogWarning("Internal bug detected: block %d has unset nChainTx (%s %s). Please report this issue here: %s\n",
+ pindex->nHeight, PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT);
+ return 0.0;
+ }
+
int64_t nNow = time(nullptr);
double fTxTotal;
@@ -5394,8 +5541,8 @@ bool ChainstateManager::ActivateSnapshot(
snapshot_chainstate.reset();
bool removed = DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true);
if (!removed) {
- GetNotifications().fatalError(strprintf("Failed to remove snapshot chainstate dir (%s). "
- "Manually remove it before restarting.\n", fs::PathToString(*snapshot_datadir)));
+ GetNotifications().fatalError(strprintf(_("Failed to remove snapshot chainstate dir (%s). "
+ "Manually remove it before restarting.\n"), fs::PathToString(*snapshot_datadir)));
}
}
return false;
@@ -5636,30 +5783,14 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
// Fake various pieces of CBlockIndex state:
CBlockIndex* index = nullptr;
- // Don't make any modifications to the genesis block.
- // This is especially important because we don't want to erroneously
- // apply BLOCK_ASSUMED_VALID to genesis, which would happen if we didn't skip
- // it here (since it apparently isn't BLOCK_VALID_SCRIPTS).
+ // Don't make any modifications to the genesis block since it shouldn't be
+ // neccessary, and since the genesis block doesn't have normal flags like
+ // BLOCK_VALID_SCRIPTS set.
constexpr int AFTER_GENESIS_START{1};
for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) {
index = snapshot_chainstate.m_chain[i];
- // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
- // entries (among other things)
- if (!index->nTx) {
- index->nTx = 1;
- }
- // Fake nChainTx so that GuessVerificationProgress reports accurately
- index->nChainTx = index->pprev->nChainTx + index->nTx;
-
- // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid.
- if (!index->IsValid(BLOCK_VALID_SCRIPTS)) {
- // This flag will be removed once the block is fully validated by a
- // background chainstate.
- index->nStatus |= BLOCK_ASSUMED_VALID;
- }
-
// Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload()
// won't ask to rewind the entire assumed-valid chain on startup.
if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) {
@@ -5675,6 +5806,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
}
assert(index);
+ assert(index == snapshot_start_block);
index->nChainTx = au_data.nChainTx;
snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
@@ -5749,7 +5881,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
user_error = strprintf(Untranslated("%s\n%s"), user_error, util::ErrorString(rename_result));
}
- GetNotifications().fatalError(user_error.original, user_error);
+ GetNotifications().fatalError(user_error);
};
if (index_new.GetBlockHash() != snapshot_blockhash) {
@@ -6090,9 +6222,9 @@ bool ChainstateManager::ValidatedSnapshotCleanup()
const fs::filesystem_error& err) {
LogPrintf("Error renaming path (%s) -> (%s): %s\n",
fs::PathToString(p_old), fs::PathToString(p_new), err.what());
- GetNotifications().fatalError(strprintf(
+ GetNotifications().fatalError(strprintf(_(
"Rename of '%s' -> '%s' failed. "
- "Cannot clean up the background chainstate leveldb directory.",
+ "Cannot clean up the background chainstate leveldb directory."),
fs::PathToString(p_old), fs::PathToString(p_new)));
};
diff --git a/src/validation.h b/src/validation.h
index 71aac46f81..0f00a48b9c 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -93,7 +93,7 @@ extern const std::vector<std::string> CHECKLEVEL_DOC;
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
-bool FatalError(kernel::Notifications& notifications, BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = {});
+bool FatalError(kernel::Notifications& notifications, BlockValidationState& state, const bilingual_str& message);
/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pindex);
@@ -274,13 +274,15 @@ MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTra
/**
* Validate (and maybe submit) a package to the mempool. See doc/policy/packages.md for full details
* on package validation rules.
-* @param[in] test_accept When true, run validation checks but don't submit to mempool.
+* @param[in] test_accept When true, run validation checks but don't submit to mempool.
+* @param[in] max_sane_feerate If exceeded by an individual transaction, rest of (sub)package evalution is aborted.
+* Only for sanity checks against local submission of transactions.
* @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult for each transaction.
* If a transaction fails, validation will exit early and some results may be missing. It is also
* possible for the package to be partially submitted.
*/
PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool,
- const Package& txns, bool test_accept)
+ const Package& txns, bool test_accept, std::optional<CFeeRate> max_sane_feerate)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/* Mempool validation helper functions */
@@ -583,9 +585,10 @@ public:
const CBlockIndex* SnapshotBase() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**
- * The set of all CBlockIndex entries with either BLOCK_VALID_TRANSACTIONS (for
- * itself and all ancestors) *or* BLOCK_ASSUMED_VALID (if using background
- * chainstates) and as good as our current tip or better. Entries may be failed,
+ * The set of all CBlockIndex entries that have as much work as our current
+ * tip or more, and transaction data needed to be validated (with
+ * BLOCK_VALID_TRANSACTIONS for each block and its parents back to the
+ * genesis block or an assumeutxo snapshot block). Entries may be failed,
* though, and pruning nodes may be missing the data for the block.
*/
std::set<CBlockIndex*, node::CBlockIndexWorkComparator> setBlockIndexCandidates;
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 63c2737706..d10db046f5 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -41,7 +41,7 @@ std::list<const CZMQAbstractNotifier*> CZMQNotificationInterface::GetActiveNotif
return result;
}
-std::unique_ptr<CZMQNotificationInterface> CZMQNotificationInterface::Create(std::function<bool(CBlock&, const CBlockIndex&)> get_block_by_index)
+std::unique_ptr<CZMQNotificationInterface> CZMQNotificationInterface::Create(std::function<bool(std::vector<uint8_t>&, const CBlockIndex&)> get_block_by_index)
{
std::map<std::string, CZMQNotifierFactory> factories;
factories["pubhashblock"] = CZMQAbstractNotifier::Create<CZMQPublishHashBlockNotifier>;
diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h
index 45d0982bd3..c879fdd0dd 100644
--- a/src/zmq/zmqnotificationinterface.h
+++ b/src/zmq/zmqnotificationinterface.h
@@ -12,6 +12,7 @@
#include <functional>
#include <list>
#include <memory>
+#include <vector>
class CBlock;
class CBlockIndex;
@@ -25,7 +26,7 @@ public:
std::list<const CZMQAbstractNotifier*> GetActiveNotifiers() const;
- static std::unique_ptr<CZMQNotificationInterface> Create(std::function<bool(CBlock&, const CBlockIndex&)> get_block_by_index);
+ static std::unique_ptr<CZMQNotificationInterface> Create(std::function<bool(std::vector<uint8_t>&, const CBlockIndex&)> get_block_by_index);
protected:
bool Initialize();
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 0f20706364..608870c489 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -243,16 +243,13 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
{
LogPrint(BCLog::ZMQ, "Publish rawblock %s to %s\n", pindex->GetBlockHash().GetHex(), this->address);
- DataStream ss;
- CBlock block;
+ std::vector<uint8_t> block{};
if (!m_get_block_by_index(block, *pindex)) {
zmqError("Can't read block from disk");
return false;
}
- ss << TX_WITH_WITNESS(block);
-
- return SendZmqMessage(MSG_RAWBLOCK, &(*ss.begin()), ss.size());
+ return SendZmqMessage(MSG_RAWBLOCK, block.data(), block.size());
}
bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &transaction)
diff --git a/src/zmq/zmqpublishnotifier.h b/src/zmq/zmqpublishnotifier.h
index a5cd433761..cc941a899c 100644
--- a/src/zmq/zmqpublishnotifier.h
+++ b/src/zmq/zmqpublishnotifier.h
@@ -10,8 +10,8 @@
#include <cstddef>
#include <cstdint>
#include <functional>
+#include <vector>
-class CBlock;
class CBlockIndex;
class CTransaction;
@@ -49,10 +49,10 @@ public:
class CZMQPublishRawBlockNotifier : public CZMQAbstractPublishNotifier
{
private:
- const std::function<bool(CBlock&, const CBlockIndex&)> m_get_block_by_index;
+ const std::function<bool(std::vector<uint8_t>&, const CBlockIndex&)> m_get_block_by_index;
public:
- CZMQPublishRawBlockNotifier(std::function<bool(CBlock&, const CBlockIndex&)> get_block_by_index)
+ CZMQPublishRawBlockNotifier(std::function<bool(std::vector<uint8_t>&, const CBlockIndex&)> get_block_by_index)
: m_get_block_by_index{std::move(get_block_by_index)} {}
bool NotifyBlock(const CBlockIndex *pindex) override;
};