diff options
Diffstat (limited to 'src')
138 files changed, 3861 insertions, 1580 deletions
diff --git a/src/Makefile.am b/src/Makefile.am index e542a067a4..8905c0ad1c 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -186,6 +186,7 @@ BITCOIN_CORE_H = \ kernel/coinstats.h \ kernel/context.h \ kernel/cs_main.h \ + kernel/disconnected_transactions.h \ kernel/mempool_entry.h \ kernel/mempool_limits.h \ kernel/mempool_options.h \ diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 934e9a1fae..28b779a5a8 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -28,6 +28,7 @@ bench_bench_bitcoin_SOURCES = \ bench/data.cpp \ bench/data.h \ bench/descriptors.cpp \ + bench/disconnected_transactions.cpp \ bench/duplicate_inputs.cpp \ bench/ellswift.cpp \ bench/examples.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 5dc20d4fab..d66f5bf53a 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -300,6 +300,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/netbase_dns_lookup.cpp \ test/fuzz/node_eviction.cpp \ test/fuzz/p2p_transport_serialization.cpp \ + test/fuzz/package_eval.cpp \ test/fuzz/parse_hd_keypath.cpp \ test/fuzz/parse_numbers.cpp \ test/fuzz/parse_script.cpp \ diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 50f576624c..8b85b77e2b 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -216,14 +216,14 @@ util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgro void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors) { LOG_TIME_SECONDS(strprintf("Flush %d outbound block-relay-only peer addresses to anchors.dat", anchors.size())); - SerializeFileDB("anchors", anchors_db_path, WithParams(CAddress::V2_DISK, anchors)); + SerializeFileDB("anchors", anchors_db_path, CAddress::V2_DISK(anchors)); } std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path) { std::vector<CAddress> anchors; try { - DeserializeFileDB(anchors_db_path, WithParams(CAddress::V2_DISK, anchors)); + DeserializeFileDB(anchors_db_path, CAddress::V2_DISK(anchors)); LogPrintf("Loaded %i addresses from %s\n", anchors.size(), fs::quoted(fs::PathToString(anchors_db_path.filename()))); } catch (const std::exception&) { anchors.clear(); diff --git a/src/addresstype.cpp b/src/addresstype.cpp index 2454cfb5d9..f199d1b479 100644 --- a/src/addresstype.cpp +++ b/src/addresstype.cpp @@ -54,11 +54,12 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) switch (whichType) { case TxoutType::PUBKEY: { CPubKey pubKey(vSolutions[0]); - if (!pubKey.IsValid()) - return false; - - addressRet = PKHash(pubKey); - return true; + if (!pubKey.IsValid()) { + addressRet = CNoDestination(scriptPubKey); + } else { + addressRet = PubKeyDestination(pubKey); + } + return false; } case TxoutType::PUBKEYHASH: { addressRet = PKHash(uint160(vSolutions[0])); @@ -87,16 +88,13 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) return true; } case TxoutType::WITNESS_UNKNOWN: { - WitnessUnknown unk; - unk.version = vSolutions[0][0]; - std::copy(vSolutions[1].begin(), vSolutions[1].end(), unk.program); - unk.length = vSolutions[1].size(); - addressRet = unk; + addressRet = WitnessUnknown{vSolutions[0][0], vSolutions[1]}; return true; } case TxoutType::MULTISIG: case TxoutType::NULL_DATA: case TxoutType::NONSTANDARD: + addressRet = CNoDestination(scriptPubKey); return false; } // no default case, so the compiler can warn about missing cases assert(false); @@ -108,7 +106,12 @@ class CScriptVisitor public: CScript operator()(const CNoDestination& dest) const { - return CScript(); + return dest.GetScript(); + } + + CScript operator()(const PubKeyDestination& dest) const + { + return CScript() << ToByteVector(dest.GetPubKey()) << OP_CHECKSIG; } CScript operator()(const PKHash& keyID) const @@ -138,9 +141,22 @@ public: CScript operator()(const WitnessUnknown& id) const { - return CScript() << CScript::EncodeOP_N(id.version) << std::vector<unsigned char>(id.program, id.program + id.length); + return CScript() << CScript::EncodeOP_N(id.GetWitnessVersion()) << id.GetWitnessProgram(); } }; + +class ValidDestinationVisitor +{ +public: + bool operator()(const CNoDestination& dest) const { return false; } + bool operator()(const PubKeyDestination& dest) const { return false; } + bool operator()(const PKHash& dest) const { return true; } + bool operator()(const ScriptHash& dest) const { return true; } + bool operator()(const WitnessV0KeyHash& dest) const { return true; } + bool operator()(const WitnessV0ScriptHash& dest) const { return true; } + bool operator()(const WitnessV1Taproot& dest) const { return true; } + bool operator()(const WitnessUnknown& dest) const { return true; } +}; } // namespace CScript GetScriptForDestination(const CTxDestination& dest) @@ -149,5 +165,5 @@ CScript GetScriptForDestination(const CTxDestination& dest) } bool IsValidDestination(const CTxDestination& dest) { - return dest.index() != 0; + return std::visit(ValidDestinationVisitor(), dest); } diff --git a/src/addresstype.h b/src/addresstype.h index 6b651e9014..d3422c6813 100644 --- a/src/addresstype.h +++ b/src/addresstype.h @@ -14,9 +14,30 @@ #include <algorithm> class CNoDestination { +private: + CScript m_script; + +public: + CNoDestination() = default; + CNoDestination(const CScript& script) : m_script(script) {} + + const CScript& GetScript() const { return m_script; } + + friend bool operator==(const CNoDestination& a, const CNoDestination& b) { return a.GetScript() == b.GetScript(); } + friend bool operator<(const CNoDestination& a, const CNoDestination& b) { return a.GetScript() < b.GetScript(); } +}; + +struct PubKeyDestination { +private: + CPubKey m_pubkey; + public: - friend bool operator==(const CNoDestination &a, const CNoDestination &b) { return true; } - friend bool operator<(const CNoDestination &a, const CNoDestination &b) { return true; } + PubKeyDestination(const CPubKey& pubkey) : m_pubkey(pubkey) {} + + const CPubKey& GetPubKey() const LIFETIMEBOUND { return m_pubkey; } + + friend bool operator==(const PubKeyDestination& a, const PubKeyDestination& b) { return a.GetPubKey() == b.GetPubKey(); } + friend bool operator<(const PubKeyDestination& a, const PubKeyDestination& b) { return a.GetPubKey() < b.GetPubKey(); } }; struct PKHash : public BaseHash<uint160> @@ -69,45 +90,55 @@ struct WitnessV1Taproot : public XOnlyPubKey //! CTxDestination subtype to encode any future Witness version struct WitnessUnknown { - unsigned int version; - unsigned int length; - unsigned char program[40]; +private: + unsigned int m_version; + std::vector<unsigned char> m_program; + +public: + WitnessUnknown(unsigned int version, const std::vector<unsigned char>& program) : m_version(version), m_program(program) {} + WitnessUnknown(int version, const std::vector<unsigned char>& program) : m_version(static_cast<unsigned int>(version)), m_program(program) {} + + unsigned int GetWitnessVersion() const { return m_version; } + const std::vector<unsigned char>& GetWitnessProgram() const LIFETIMEBOUND { return m_program; } friend bool operator==(const WitnessUnknown& w1, const WitnessUnknown& w2) { - if (w1.version != w2.version) return false; - if (w1.length != w2.length) return false; - return std::equal(w1.program, w1.program + w1.length, w2.program); + if (w1.GetWitnessVersion() != w2.GetWitnessVersion()) return false; + return w1.GetWitnessProgram() == w2.GetWitnessProgram(); } friend bool operator<(const WitnessUnknown& w1, const WitnessUnknown& w2) { - if (w1.version < w2.version) return true; - if (w1.version > w2.version) return false; - if (w1.length < w2.length) return true; - if (w1.length > w2.length) return false; - return std::lexicographical_compare(w1.program, w1.program + w1.length, w2.program, w2.program + w2.length); + if (w1.GetWitnessVersion() < w2.GetWitnessVersion()) return true; + if (w1.GetWitnessVersion() > w2.GetWitnessVersion()) return false; + return w1.GetWitnessProgram() < w2.GetWitnessProgram(); } }; /** - * A txout script template with a specific destination. It is either: - * * CNoDestination: no destination set - * * PKHash: TxoutType::PUBKEYHASH destination (P2PKH) - * * ScriptHash: TxoutType::SCRIPTHASH destination (P2SH) - * * WitnessV0ScriptHash: TxoutType::WITNESS_V0_SCRIPTHASH destination (P2WSH) - * * WitnessV0KeyHash: TxoutType::WITNESS_V0_KEYHASH destination (P2WPKH) - * * WitnessV1Taproot: TxoutType::WITNESS_V1_TAPROOT destination (P2TR) - * * WitnessUnknown: TxoutType::WITNESS_UNKNOWN destination (P2W???) + * A txout script categorized into standard templates. + * * CNoDestination: Optionally a script, no corresponding address. + * * PubKeyDestination: TxoutType::PUBKEY (P2PK), no corresponding address + * * PKHash: TxoutType::PUBKEYHASH destination (P2PKH address) + * * ScriptHash: TxoutType::SCRIPTHASH destination (P2SH address) + * * WitnessV0ScriptHash: TxoutType::WITNESS_V0_SCRIPTHASH destination (P2WSH address) + * * WitnessV0KeyHash: TxoutType::WITNESS_V0_KEYHASH destination (P2WPKH address) + * * WitnessV1Taproot: TxoutType::WITNESS_V1_TAPROOT destination (P2TR address) + * * WitnessUnknown: TxoutType::WITNESS_UNKNOWN destination (P2W??? address) * A CTxDestination is the internal data type encoded in a bitcoin address */ -using CTxDestination = std::variant<CNoDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, WitnessV1Taproot, WitnessUnknown>; +using CTxDestination = std::variant<CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, WitnessV1Taproot, WitnessUnknown>; -/** Check whether a CTxDestination is a CNoDestination. */ +/** Check whether a CTxDestination corresponds to one with an address. */ bool IsValidDestination(const CTxDestination& dest); /** - * Parse a standard scriptPubKey for the destination address. Assigns result to - * the addressRet parameter and returns true if successful. Currently only works for P2PK, - * P2PKH, P2SH, P2WPKH, and P2WSH scripts. + * Parse a scriptPubKey for the destination. + * + * For standard scripts that have addresses (and P2PK as an exception), a corresponding CTxDestination + * is assigned to addressRet. + * For all other scripts. addressRet is assigned as a CNoDestination containing the scriptPubKey. + * + * Returns true for standard destinations with addresses - P2PKH, P2SH, P2WPKH, P2WSH, P2TR and P2W??? scripts. + * Returns false for non-standard destinations and those without addresses - P2PK, bare multisig, null data, and nonstandard scripts. */ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet); diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index 0e110a653a..249b76ee85 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -79,7 +79,7 @@ static void CoinSelection(benchmark::Bench& bench) }; auto group = wallet::GroupOutputs(wallet, available_coins, coin_selection_params, {{filter_standard}})[filter_standard]; bench.run([&] { - auto result = AttemptSelection(1003 * COIN, group, coin_selection_params, /*allow_mixed_output_types=*/true); + auto result = AttemptSelection(wallet.chain(), 1003 * COIN, group, coin_selection_params, /*allow_mixed_output_types=*/true); assert(result); assert(result->GetSelectedValue() == 1003 * COIN); assert(result->GetInputSet().size() == 2); diff --git a/src/bench/disconnected_transactions.cpp b/src/bench/disconnected_transactions.cpp new file mode 100644 index 0000000000..0a7344b248 --- /dev/null +++ b/src/bench/disconnected_transactions.cpp @@ -0,0 +1,130 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <bench/bench.h> +#include <kernel/disconnected_transactions.h> +#include <primitives/block.h> +#include <test/util/random.h> +#include <test/util/setup_common.h> + +constexpr size_t BLOCK_VTX_COUNT{4000}; +constexpr size_t BLOCK_VTX_COUNT_10PERCENT{400}; + +using BlockTxns = decltype(CBlock::vtx); + +/** Reorg where 1 block is disconnected and 2 blocks are connected. */ +struct ReorgTxns { + /** Disconnected block. */ + BlockTxns disconnected_txns; + /** First connected block. */ + BlockTxns connected_txns_1; + /** Second connected block, new chain tip. Has no overlap with disconnected_txns. */ + BlockTxns connected_txns_2; + /** Transactions shared between disconnected_txns and connected_txns_1. */ + size_t num_shared; +}; + +static BlockTxns CreateRandomTransactions(size_t num_txns) +{ + // Ensure every transaction has a different txid by having each one spend the previous one. + static uint256 prevout_hash{uint256::ZERO}; + + BlockTxns txns; + txns.reserve(num_txns); + // Simplest spk for every tx + CScript spk = CScript() << OP_TRUE; + for (uint32_t i = 0; i < num_txns; ++i) { + CMutableTransaction tx; + tx.vin.emplace_back(CTxIn{COutPoint{prevout_hash, 0}}); + tx.vout.emplace_back(CTxOut{CENT, spk}); + auto ptx{MakeTransactionRef(tx)}; + txns.emplace_back(ptx); + prevout_hash = ptx->GetHash(); + } + return txns; +} + +/** Creates blocks for a Reorg, each with BLOCK_VTX_COUNT transactions. Between the disconnected + * block and the first connected block, there will be num_not_shared transactions that are + * different, and all other transactions the exact same. The second connected block has all unique + * transactions. This is to simulate a reorg in which all but num_not_shared transactions are + * confirmed in the new chain. */ +static ReorgTxns CreateBlocks(size_t num_not_shared) +{ + auto num_shared{BLOCK_VTX_COUNT - num_not_shared}; + const auto shared_txns{CreateRandomTransactions(/*num_txns=*/num_shared)}; + + // Create different sets of transactions... + auto disconnected_block_txns{CreateRandomTransactions(/*num_txns=*/num_not_shared)}; + std::copy(shared_txns.begin(), shared_txns.end(), std::back_inserter(disconnected_block_txns)); + + auto connected_block_txns{CreateRandomTransactions(/*num_txns=*/num_not_shared)}; + std::copy(shared_txns.begin(), shared_txns.end(), std::back_inserter(connected_block_txns)); + + assert(disconnected_block_txns.size() == BLOCK_VTX_COUNT); + assert(connected_block_txns.size() == BLOCK_VTX_COUNT); + + return ReorgTxns{/*disconnected_txns=*/disconnected_block_txns, + /*connected_txns_1=*/connected_block_txns, + /*connected_txns_2=*/CreateRandomTransactions(BLOCK_VTX_COUNT), + /*num_shared=*/num_shared}; +} + +static void Reorg(const ReorgTxns& reorg) +{ + DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_SIZE * 1000}; + // Disconnect block + const auto evicted = disconnectpool.AddTransactionsFromBlock(reorg.disconnected_txns); + assert(evicted.empty()); + + // Connect first block + disconnectpool.removeForBlock(reorg.connected_txns_1); + // Connect new tip + disconnectpool.removeForBlock(reorg.connected_txns_2); + + // Sanity Check + assert(disconnectpool.size() == BLOCK_VTX_COUNT - reorg.num_shared); + + disconnectpool.clear(); +} + +/** Add transactions from DisconnectedBlockTransactions, remove all but one (the disconnected + * block's coinbase transaction) of them, and then pop from the front until empty. This is a reorg + * in which all of the non-coinbase transactions in the disconnected chain also exist in the new + * chain. */ +static void AddAndRemoveDisconnectedBlockTransactionsAll(benchmark::Bench& bench) +{ + const auto chains{CreateBlocks(/*num_not_shared=*/1)}; + assert(chains.num_shared == BLOCK_VTX_COUNT - 1); + + bench.minEpochIterations(10).run([&]() { + Reorg(chains); + }); +} + +/** Add transactions from DisconnectedBlockTransactions, remove 90% of them, and then pop from the front until empty. */ +static void AddAndRemoveDisconnectedBlockTransactions90(benchmark::Bench& bench) +{ + const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT_10PERCENT)}; + assert(chains.num_shared == BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT); + + bench.minEpochIterations(10).run([&]() { + Reorg(chains); + }); +} + +/** Add transactions from DisconnectedBlockTransactions, remove 10% of them, and then pop from the front until empty. */ +static void AddAndRemoveDisconnectedBlockTransactions10(benchmark::Bench& bench) +{ + const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT)}; + assert(chains.num_shared == BLOCK_VTX_COUNT_10PERCENT); + + bench.minEpochIterations(10).run([&]() { + Reorg(chains); + }); +} + +BENCHMARK(AddAndRemoveDisconnectedBlockTransactionsAll, benchmark::PriorityLevel::HIGH); +BENCHMARK(AddAndRemoveDisconnectedBlockTransactions90, benchmark::PriorityLevel::HIGH); +BENCHMARK(AddAndRemoveDisconnectedBlockTransactions10, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/load_external.cpp b/src/bench/load_external.cpp index 252cbb163b..3b100d97b0 100644 --- a/src/bench/load_external.cpp +++ b/src/bench/load_external.cpp @@ -5,6 +5,7 @@ #include <bench/bench.h> #include <bench/data.h> #include <chainparams.h> +#include <clientversion.h> #include <test/util/setup_common.h> #include <util/chaintype.h> #include <validation.h> @@ -54,7 +55,7 @@ static void LoadExternalBlockFile(benchmark::Bench& bench) bench.run([&] { // "rb" is "binary, O_RDONLY", positioned to the start of the file. // The file will be closed by LoadExternalBlockFile(). - FILE* file{fsbridge::fopen(blkfile, "rb")}; + CAutoFile file{fsbridge::fopen(blkfile, "rb"), CLIENT_VERSION}; testing_setup->m_node.chainman->LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); }); fs::remove(blkfile); diff --git a/src/bench/streams_findbyte.cpp b/src/bench/streams_findbyte.cpp index 22b8f1b356..4aaccb2af8 100644 --- a/src/bench/streams_findbyte.cpp +++ b/src/bench/streams_findbyte.cpp @@ -4,19 +4,23 @@ #include <bench/bench.h> -#include <util/fs.h> #include <streams.h> +#include <util/fs.h> + +#include <cstddef> +#include <cstdint> +#include <cstdio> static void FindByte(benchmark::Bench& bench) { // Setup - FILE* file = fsbridge::fopen("streams_tmp", "w+b"); + CAutoFile file{fsbridge::fopen("streams_tmp", "w+b"), 0}; const size_t file_size = 200; uint8_t data[file_size] = {0}; data[file_size-1] = 1; - fwrite(&data, sizeof(uint8_t), file_size, file); - rewind(file); - BufferedFile bf{file, /*nBufSize=*/file_size + 1, /*nRewindIn=*/file_size, 0}; + file << data; + std::rewind(file.Get()); + BufferedFile bf{file, /*nBufSize=*/file_size + 1, /*nRewindIn=*/file_size}; bench.run([&] { bf.SetPos(0); @@ -24,7 +28,7 @@ static void FindByte(benchmark::Bench& bench) }); // Cleanup - bf.fclose(); + file.fclose(); fs::remove("streams_tmp"); } diff --git a/src/bench/wallet_create_tx.cpp b/src/bench/wallet_create_tx.cpp index 5e5bc76fd2..160534b63c 100644 --- a/src/bench/wallet_create_tx.cpp +++ b/src/bench/wallet_create_tx.cpp @@ -70,7 +70,7 @@ void generateFakeBlock(const CChainParams& params, // notify wallet const auto& pindex = WITH_LOCK(::cs_main, return context.chainman->ActiveChain().Tip()); - wallet.blockConnected(kernel::MakeBlockInfo(pindex, &block)); + wallet.blockConnected(ChainstateRole::NORMAL, kernel::MakeBlockInfo(pindex, &block)); } struct PreSelectInputs { diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp index 985a81f522..dd3824fb1c 100644 --- a/src/blockfilter.cpp +++ b/src/blockfilter.cpp @@ -16,9 +16,6 @@ #include <util/golombrice.h> #include <util/string.h> -/// SerType used to serialize parameters in GCS filter encoding. -static constexpr int GCS_SER_TYPE = SER_NETWORK; - /// Protocol version used to serialize parameters in GCS filter encoding. static constexpr int GCS_SER_VERSION = 0; @@ -52,7 +49,7 @@ GCSFilter::GCSFilter(const Params& params) GCSFilter::GCSFilter(const Params& params, std::vector<unsigned char> encoded_filter, bool skip_decode_check) : m_params(params), m_encoded(std::move(encoded_filter)) { - SpanReader stream{GCS_SER_TYPE, GCS_SER_VERSION, m_encoded}; + SpanReader stream{GCS_SER_VERSION, m_encoded}; uint64_t N = ReadCompactSize(stream); m_N = static_cast<uint32_t>(N); @@ -84,7 +81,7 @@ GCSFilter::GCSFilter(const Params& params, const ElementSet& elements) } m_F = static_cast<uint64_t>(m_N) * static_cast<uint64_t>(m_params.m_M); - CVectorWriter stream(GCS_SER_TYPE, GCS_SER_VERSION, m_encoded, 0); + CVectorWriter stream(GCS_SER_VERSION, m_encoded, 0); WriteCompactSize(stream, m_N); @@ -106,7 +103,7 @@ GCSFilter::GCSFilter(const Params& params, const ElementSet& elements) bool GCSFilter::MatchInternal(const uint64_t* element_hashes, size_t size) const { - SpanReader stream{GCS_SER_TYPE, GCS_SER_VERSION, m_encoded}; + SpanReader stream{GCS_SER_VERSION, m_encoded}; // Seek forward by size of N uint64_t N = ReadCompactSize(stream); diff --git a/src/chain.h b/src/chain.h index 7806720ce9..78b06719f4 100644 --- a/src/chain.h +++ b/src/chain.h @@ -276,6 +276,12 @@ public: * * Does not imply the transactions are consensus-valid (ConnectTip might fail) * Does not imply the transactions are still stored on disk. (IsBlockPruned might return true) + * + * Note that this will be true for the snapshot base block, if one is loaded (and + * all subsequent assumed-valid blocks) since its nChainTx value will have been set + * manually based on the related AssumeutxoData entry. + * + * TODO: potentially change the name of this based on the fact above. */ bool HaveTxsDownloaded() const { return nChainTx != 0; } diff --git a/src/hash.h b/src/hash.h index f2b627ff4f..d355b703ff 100644 --- a/src/hash.h +++ b/src/hash.h @@ -149,13 +149,11 @@ public: class CHashWriter : public HashWriter { private: - const int nType; const int nVersion; public: - CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {} + CHashWriter(int nVersionIn) : nVersion{nVersionIn} {} - int GetType() const { return nType; } int GetVersion() const { return nVersion; } template<typename T> @@ -223,15 +221,6 @@ public: } }; -/** Compute the 256-bit hash of an object's serialization. */ -template<typename T> -uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL_VERSION) -{ - CHashWriter ss(nType, nVersion); - ss << obj; - return ss.GetHash(); -} - /** Single-SHA256 a 32-byte input (represented as uint256). */ [[nodiscard]] uint256 SHA256Uint256(const uint256& input); diff --git a/src/headerssync.cpp b/src/headerssync.cpp index a3adfb4f70..f891063cd2 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -7,6 +7,7 @@ #include <pow.h> #include <timedata.h> #include <util/check.h> +#include <util/vector.h> // The two constants below are computed using the simulation script on // https://gist.github.com/sipa/016ae445c132cdf65a2791534dfb7ae1 @@ -51,9 +52,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus void HeadersSyncState::Finalize() { Assume(m_download_state != State::FINAL); - m_header_commitments = {}; + ClearShrink(m_header_commitments); m_last_header_received.SetNull(); - m_redownloaded_headers = {}; + ClearShrink(m_redownloaded_headers); m_redownload_buffer_last_hash.SetNull(); m_redownload_buffer_first_prev_hash.SetNull(); m_process_all_remaining_headers = false; diff --git a/src/index/base.cpp b/src/index/base.cpp index f18205a76f..8474d01c41 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -79,9 +79,15 @@ BaseIndex::~BaseIndex() bool BaseIndex::Init() { + AssertLockNotHeld(cs_main); + + // May need reset if index is being restarted. + m_interrupt.reset(); + // m_chainstate member gives indexing code access to node internals. It is // removed in followup https://github.com/bitcoin/bitcoin/pull/24230 - m_chainstate = &m_chain->context()->chainman->ActiveChainstate(); + m_chainstate = WITH_LOCK(::cs_main, + return &m_chain->context()->chainman->GetChainstateForIndexing()); // Register to validation interface before setting the 'm_synced' flag, so that // callbacks are not missed once m_synced is true. RegisterValidationInterface(this); @@ -92,7 +98,8 @@ bool BaseIndex::Init() } LOCK(cs_main); - CChain& active_chain = m_chainstate->m_chain; + CChain& index_chain = m_chainstate->m_chain; + if (locator.IsNull()) { SetBestBlockIndex(nullptr); } else { @@ -114,7 +121,7 @@ bool BaseIndex::Init() // Note: this will latch to true immediately if the user starts up with an empty // datadir and an index enabled. If this is the case, indexation will happen solely // via `BlockConnected` signals until, possibly, the next restart. - m_synced = start_block == active_chain.Tip(); + m_synced = start_block == index_chain.Tip(); m_init = true; return true; } @@ -143,6 +150,8 @@ void BaseIndex::ThreadSync() std::chrono::steady_clock::time_point last_locator_write_time{0s}; while (true) { if (m_interrupt) { + LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName()); + SetBestBlockIndex(pindex); // No need to handle errors in Commit. If it fails, the error will be already be // logged. The best way to recover is to continue, as index cannot be corrupted by @@ -250,8 +259,19 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti return true; } -void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) +void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) { + // Ignore events from the assumed-valid chain; we will process its blocks + // (sequentially) after it is fully verified by the background chainstate. This + // is to avoid any out-of-order indexing. + // + // TODO at some point we could parameterize whether a particular index can be + // built out of order, but for now just do the conservative simple thing. + if (role == ChainstateRole::ASSUMEDVALID) { + return; + } + + // Ignore BlockConnected signals until we have fully indexed the chain. if (!m_synced) { return; } @@ -296,8 +316,14 @@ void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const } } -void BaseIndex::ChainStateFlushed(const CBlockLocator& locator) +void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) { + // Ignore events from the assumed-valid chain; we will process its blocks + // (sequentially) after it is fully verified by the background chainstate. + if (role == ChainstateRole::ASSUMEDVALID) { + return; + } + if (!m_synced) { return; } diff --git a/src/index/base.h b/src/index/base.h index 9b2a41dc92..154061fb19 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -15,6 +15,7 @@ class CBlock; class CBlockIndex; class Chainstate; +class ChainstateManager; namespace interfaces { class Chain; } // namespace interfaces @@ -30,6 +31,11 @@ struct IndexSummary { * Base class for indices of blockchain data. This implements * CValidationInterface and ensures blocks are indexed sequentially according * to their position in the active chain. + * + * In the presence of multiple chainstates (i.e. if a UTXO snapshot is loaded), + * only the background "IBD" chainstate will be indexed to avoid building the + * index out of order. When the background chainstate completes validation, the + * index will be reinitialized and indexing will continue. */ class BaseIndex : public CValidationInterface { @@ -102,9 +108,9 @@ protected: Chainstate* m_chainstate{nullptr}; const std::string m_name; - void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override; + void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override; - void ChainStateFlushed(const CBlockLocator& locator) override; + void ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) override; /// Initialize internal state from the database and block index. [[nodiscard]] virtual bool CustomInit(const std::optional<interfaces::BlockKey>& block) { return true; } @@ -122,9 +128,6 @@ protected: virtual DB& GetDB() const = 0; - /// Get the name of the index for display in logs. - const std::string& GetName() const LIFETIMEBOUND { return m_name; } - /// Update the internal best block index as well as the prune lock. void SetBestBlockIndex(const CBlockIndex* block); @@ -133,6 +136,9 @@ public: /// Destructor interrupts sync thread if running and blocks until it exits. virtual ~BaseIndex(); + /// Get the name of the index for display in logs. + const std::string& GetName() const LIFETIMEBOUND { return m_name; } + /// Blocks the current thread until the index is caught up to the current /// state of the block chain. This only blocks if the index has gotten in /// sync once and only needs to process blocks in the ValidationInterface diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp index 6b38e19d81..e16dd0f8bd 100644 --- a/src/index/txindex.cpp +++ b/src/index/txindex.cpp @@ -79,7 +79,7 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe return false; } - CAutoFile file{m_chainstate->m_blockman.OpenBlockFile(postx, true), CLIENT_VERSION}; + CAutoFile file{m_chainstate->m_blockman.OpenBlockFile(postx, true)}; if (file.IsNull()) { return error("%s: OpenBlockFile failed", __func__); } diff --git a/src/init.cpp b/src/init.cpp index 6dd3d5970b..a0b4425898 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -498,6 +498,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-i2psam=<ip:port>", "I2P SAM proxy to reach I2P peers and accept I2P connections (default: none)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-i2pacceptincoming", strprintf("Whether to accept inbound I2P connections (default: %i). Ignored if -i2psam is not set. Listening for inbound I2P connections is done through the SAM proxy, not by binding to a local address and port.", DEFAULT_I2P_ACCEPT_INCOMING), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onlynet=<net>", "Make automatic outbound connections only to network <net> (" + Join(GetNetworkNames(), ", ") + "). Inbound and manual connections are not affected by this option. It can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-v2transport", strprintf("Support v2 transport (default: %u)", DEFAULT_V2_TRANSPORT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); @@ -893,6 +894,11 @@ bool AppInitParameterInteraction(const ArgsManager& args) } } + // Signal NODE_P2P_V2 if BIP324 v2 transport is enabled. + if (args.GetBoolArg("-v2transport", DEFAULT_V2_TRANSPORT)) { + nLocalServices = ServiceFlags(nLocalServices | NODE_P2P_V2); + } + // Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled. if (args.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) { if (g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) { @@ -1478,6 +1484,25 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.chainman = std::make_unique<ChainstateManager>(node.kernel->interrupt, chainman_opts, blockman_opts); ChainstateManager& chainman = *node.chainman; + // This is defined and set here instead of inline in validation.h to avoid a hard + // dependency between validation and index/base, since the latter is not in + // libbitcoinkernel. + chainman.restart_indexes = [&node]() { + LogPrintf("[snapshot] restarting indexes\n"); + + // Drain the validation interface queue to ensure that the old indexes + // don't have any pending work. + SyncWithValidationInterfaceQueue(); + + for (auto* index : node.indexes) { + index->Interrupt(); + index->Stop(); + if (!(index->Init() && index->StartBackgroundSync())) { + LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName()); + } + } + }; + node::ChainstateLoadOptions options; options.mempool = Assert(node.mempool.get()); options.reindex = node::fReindex; @@ -1906,18 +1931,19 @@ bool StartIndexBackgroundSync(NodeContext& node) // indexes_start_block='nullptr' means "start from height 0". std::optional<const CBlockIndex*> indexes_start_block; std::string older_index_name; - ChainstateManager& chainman = *Assert(node.chainman); + const Chainstate& chainstate = WITH_LOCK(::cs_main, return chainman.GetChainstateForIndexing()); + const CChain& index_chain = chainstate.m_chain; + for (auto index : node.indexes) { const IndexSummary& summary = index->GetSummary(); if (summary.synced) continue; // Get the last common block between the index best block and the active chain LOCK(::cs_main); - const CChain& active_chain = chainman.ActiveChain(); const CBlockIndex* pindex = chainman.m_blockman.LookupBlockIndex(summary.best_block_hash); - if (!active_chain.Contains(pindex)) { - pindex = active_chain.FindFork(pindex); + if (!index_chain.Contains(pindex)) { + pindex = index_chain.FindFork(pindex); } if (!indexes_start_block || !pindex || pindex->nHeight < indexes_start_block.value()->nHeight) { @@ -1932,7 +1958,7 @@ bool StartIndexBackgroundSync(NodeContext& node) LOCK(::cs_main); const CBlockIndex* start_block = *indexes_start_block; if (!start_block) start_block = chainman.ActiveChain().Genesis(); - if (!chainman.m_blockman.CheckBlockDataAvailability(*chainman.ActiveChain().Tip(), *Assert(start_block))) { + if (!chainman.m_blockman.CheckBlockDataAvailability(*index_chain.Tip(), *Assert(start_block))) { return InitError(strprintf(Untranslated("%s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"), older_index_name)); } } diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index dd664165d3..dea868f844 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -27,6 +27,7 @@ class Coin; class uint256; enum class MemPoolRemovalReason; enum class RBFTransactionState; +enum class ChainstateRole; struct bilingual_str; struct CBlockLocator; struct FeeCalculation; @@ -216,6 +217,43 @@ public: //! Calculate mempool ancestor and descendant counts for the given transaction. virtual void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize = nullptr, CAmount* ancestorfees = nullptr) = 0; + //! For each outpoint, calculate the fee-bumping cost to spend this outpoint at the specified + // feerate, including bumping its ancestors. For example, if the target feerate is 10sat/vbyte + // and this outpoint refers to a mempool transaction at 3sat/vbyte, the bump fee includes the + // cost to bump the mempool transaction to 10sat/vbyte (i.e. 7 * mempooltx.vsize). If that + // transaction also has, say, an unconfirmed parent with a feerate of 1sat/vbyte, the bump fee + // includes the cost to bump the parent (i.e. 9 * parentmempooltx.vsize). + // + // If the outpoint comes from an unconfirmed transaction that is already above the target + // feerate or bumped by its descendant(s) already, it does not need to be bumped. Its bump fee + // is 0. Likewise, if any of the transaction's ancestors are already bumped by a transaction + // in our mempool, they are not included in the transaction's bump fee. + // + // Also supported is bump-fee calculation in the case of replacements. If an outpoint + // conflicts with another transaction in the mempool, it is assumed that the goal is to replace + // that transaction. As such, the calculation will exclude the to-be-replaced transaction, but + // will include the fee-bumping cost. If bump fees of descendants of the to-be-replaced + // transaction are requested, the value will be 0. Fee-related RBF rules are not included as + // they are logically distinct. + // + // Any outpoints that are otherwise unavailable from the mempool (e.g. UTXOs from confirmed + // transactions or transactions not yet broadcast by the wallet) are given a bump fee of 0. + // + // If multiple outpoints come from the same transaction (which would be very rare because + // it means that one transaction has multiple change outputs or paid the same wallet using multiple + // outputs in the same transaction) or have shared ancestry, the bump fees are calculated + // independently, i.e. as if only one of them is spent. This may result in double-fee-bumping. This + // caveat can be rectified per use of the sister-function CalculateCombinedBumpFee(…). + virtual std::map<COutPoint, CAmount> CalculateIndividualBumpFees(const std::vector<COutPoint>& outpoints, const CFeeRate& target_feerate) = 0; + + //! Calculate the combined bump fee for an input set per the same strategy + // as in CalculateIndividualBumpFees(…). + // Unlike CalculateIndividualBumpFees(…), this does not return individual + // bump fees per outpoint, but a single bump fee for the shared ancestry. + // The combined bump fee may be used to correct overestimation due to + // shared ancestry by multiple UTXOs after coin selection. + virtual std::optional<CAmount> CalculateCombinedBumpFee(const std::vector<COutPoint>& outpoints, const CFeeRate& target_feerate) = 0; + //! Get the node's package limits. //! Currently only returns the ancestor and descendant count limits, but could be enhanced to //! return more policy settings. @@ -273,10 +311,10 @@ public: virtual ~Notifications() {} virtual void transactionAddedToMempool(const CTransactionRef& tx) {} virtual void transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason) {} - virtual void blockConnected(const BlockInfo& block) {} + virtual void blockConnected(ChainstateRole role, const BlockInfo& block) {} virtual void blockDisconnected(const BlockInfo& block) {} virtual void updatedBlockTip() {} - virtual void chainStateFlushed(const CBlockLocator& locator) {} + virtual void chainStateFlushed(ChainstateRole role, const CBlockLocator& locator) {} }; //! Register handler for notifications. diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index 9987681367..4b896c11a3 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -51,6 +51,7 @@ struct WalletBalances; struct WalletTx; struct WalletTxOut; struct WalletTxStatus; +struct WalletMigrationResult; using WalletOrderForm = std::vector<std::pair<std::string, std::string>>; using WalletValueMap = std::map<std::string, std::string>; @@ -333,6 +334,9 @@ public: //! Restore backup wallet virtual util::Result<std::unique_ptr<Wallet>> restoreWallet(const fs::path& backup_file, const std::string& wallet_name, std::vector<bilingual_str>& warnings) = 0; + //! Migrate a wallet + virtual util::Result<WalletMigrationResult> migrateWallet(const std::string& name, const SecureString& passphrase) = 0; + //! Return available wallets in wallet directory. virtual std::vector<std::string> listWalletDir() = 0; @@ -389,6 +393,7 @@ struct WalletTx CTransactionRef tx; std::vector<wallet::isminetype> txin_is_mine; std::vector<wallet::isminetype> txout_is_mine; + std::vector<bool> txout_is_change; std::vector<CTxDestination> txout_address; std::vector<wallet::isminetype> txout_address_is_mine; CAmount credit; @@ -424,6 +429,15 @@ struct WalletTxOut bool is_spent = false; }; +//! Migrated wallet info +struct WalletMigrationResult +{ + std::unique_ptr<Wallet> wallet; + std::optional<std::string> watchonly_wallet_name; + std::optional<std::string> solvables_wallet_name; + fs::path backup_path; +}; + //! Return implementation of Wallet interface. This function is defined in //! dummywallet.cpp and throws if the wallet component is not compiled. std::unique_ptr<Wallet> MakeWallet(wallet::WalletContext& context, const std::shared_ptr<wallet::CWallet>& wallet); diff --git a/src/kernel/chain.cpp b/src/kernel/chain.cpp index 1c877866d0..318c956b38 100644 --- a/src/kernel/chain.cpp +++ b/src/kernel/chain.cpp @@ -4,6 +4,7 @@ #include <chain.h> #include <interfaces/chain.h> +#include <kernel/chain.h> #include <sync.h> #include <uint256.h> @@ -25,3 +26,13 @@ interfaces::BlockInfo MakeBlockInfo(const CBlockIndex* index, const CBlock* data return info; } } // namespace kernel + +std::ostream& operator<<(std::ostream& os, const ChainstateRole& role) { + switch(role) { + case ChainstateRole::NORMAL: os << "normal"; break; + case ChainstateRole::ASSUMEDVALID: os << "assumedvalid"; break; + case ChainstateRole::BACKGROUND: os << "background"; break; + default: os.setstate(std::ios_base::failbit); + } + return os; +} diff --git a/src/kernel/chain.h b/src/kernel/chain.h index f0750f8266..feba24a557 100644 --- a/src/kernel/chain.h +++ b/src/kernel/chain.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_KERNEL_CHAIN_H #define BITCOIN_KERNEL_CHAIN_H +#include<iostream> + class CBlock; class CBlockIndex; namespace interfaces { @@ -14,6 +16,24 @@ struct BlockInfo; namespace kernel { //! Return data from block index. interfaces::BlockInfo MakeBlockInfo(const CBlockIndex* block_index, const CBlock* data = nullptr); + } // namespace kernel +//! This enum describes the various roles a specific Chainstate instance can take. +//! Other parts of the system sometimes need to vary in behavior depending on the +//! existence of a background validation chainstate, e.g. when building indexes. +enum class ChainstateRole { + // Single chainstate in use, "normal" IBD mode. + NORMAL, + + // Doing IBD-style validation in the background. Implies use of an assumed-valid + // chainstate. + BACKGROUND, + + // Active assumed-valid chainstate. Implies use of a background IBD chainstate. + ASSUMEDVALID, +}; + +std::ostream& operator<<(std::ostream& os, const ChainstateRole& role); + #endif // BITCOIN_KERNEL_CHAIN_H diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 6cee379faf..5e893a3f58 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -152,7 +152,6 @@ public: vFixedSeeds = std::vector<uint8_t>(std::begin(chainparams_seed_main), std::end(chainparams_seed_main)); fDefaultConsistencyChecks = false; - m_is_test_chain = false; m_is_mockable_chain = false; checkpointData = { @@ -173,8 +172,8 @@ public: } }; - m_assumeutxo_data = MapAssumeutxo{ - // TODO to be specified in a future patch. + m_assumeutxo_data = { + // TODO to be specified in a future patch. }; chainTxData = ChainTxData{ @@ -259,7 +258,6 @@ public: vFixedSeeds = std::vector<uint8_t>(std::begin(chainparams_seed_test), std::end(chainparams_seed_test)); fDefaultConsistencyChecks = false; - m_is_test_chain = true; m_is_mockable_chain = false; checkpointData = { @@ -268,8 +266,13 @@ public: } }; - m_assumeutxo_data = MapAssumeutxo{ - // TODO to be specified in a future patch. + m_assumeutxo_data = { + { + .height = 2'500'000, + .hash_serialized = AssumeutxoHash{uint256S("0x2a8fdefef3bf75fa00540ccaaaba4b5281bea94229327bdb0f7416ef1e7a645c")}, + .nChainTx = 66484552, + .blockhash = uint256S("0x0000000000000093bcb68c03a9a168ae252572d348a2eaeba2cdf9231d73206f") + } }; chainTxData = ChainTxData{ @@ -372,6 +375,15 @@ public: vFixedSeeds.clear(); + m_assumeutxo_data = { + { + .height = 160'000, + .hash_serialized = AssumeutxoHash{uint256S("0x5225141cb62dee63ab3be95f9b03d60801f264010b1816d4bd00618b2736e7be")}, + .nChainTx = 2289496, + .blockhash = uint256S("0x0000003ca3c99aff040f2563c2ad8f8ec88bd0fd6b8f0895cfaf1ef90353a62c") + } + }; + base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111); base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196); base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,239); @@ -381,7 +393,6 @@ public: bech32_hrp = "tb"; fDefaultConsistencyChecks = false; - m_is_test_chain = true; m_is_mockable_chain = false; } }; @@ -472,7 +483,6 @@ public: vSeeds.emplace_back("dummySeed.invalid."); fDefaultConsistencyChecks = true; - m_is_test_chain = true; m_is_mockable_chain = true; checkpointData = { @@ -481,14 +491,19 @@ public: } }; - m_assumeutxo_data = MapAssumeutxo{ + m_assumeutxo_data = { { - 110, - {AssumeutxoHash{uint256S("0x1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618")}, 110}, + .height = 110, + .hash_serialized = AssumeutxoHash{uint256S("0x1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618")}, + .nChainTx = 110, + .blockhash = uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c") }, { - 200, - {AssumeutxoHash{uint256S("0x51c8d11d8b5c1de51543c579736e786aa2736206d1e11e627568029ce092cf62")}, 200}, + // For use by test/functional/feature_assumeutxo.py + .height = 299, + .hash_serialized = AssumeutxoHash{uint256S("0xef45ccdca5898b6c2145e4581d2b88c56564dd389e4bd75a1aaf6961d3edd3c0")}, + .nChainTx = 300, + .blockhash = uint256S("0x7e0517ef3ea6ecbed9117858e42eedc8eb39e8698a38dcbd1b3962a283233f4c") }, }; diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 63837bb23e..7a5539bc71 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -12,6 +12,7 @@ #include <uint256.h> #include <util/chaintype.h> #include <util/hash_type.h> +#include <util/vector.h> #include <cstdint> #include <iterator> @@ -44,17 +45,21 @@ struct AssumeutxoHash : public BaseHash<uint256> { * as valid. */ struct AssumeutxoData { + int height; + //! The expected hash of the deserialized UTXO set. - const AssumeutxoHash hash_serialized; + AssumeutxoHash hash_serialized; //! Used to populate the nChainTx value, which is used during BlockManager::LoadBlockIndex(). //! //! We need to hardcode the value here because this is computed cumulatively using block data, //! which we do not necessarily have at the time of snapshot load. - const unsigned int nChainTx; -}; + unsigned int nChainTx; -using MapAssumeutxo = std::map<int, const AssumeutxoData>; + //! The hash of the base block for this snapshot. Used to refer to assumeutxo data + //! prior to having a loaded blockindex. + uint256 blockhash; +}; /** * Holds various statistics on transactions within a chain. Used to estimate @@ -93,7 +98,7 @@ public: /** Default value for -checkmempool and -checkblockindex argument */ bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; } /** If this chain is exclusively used for testing */ - bool IsTestChain() const { return m_is_test_chain; } + bool IsTestChain() const { return m_chain_type != ChainType::MAIN; } /** If this chain allows time to be mocked */ bool IsMockableChain() const { return m_is_mockable_chain; } uint64_t PruneAfterHeight() const { return nPruneAfterHeight; } @@ -114,9 +119,14 @@ public: const std::vector<uint8_t>& FixedSeeds() const { return vFixedSeeds; } const CCheckpointData& Checkpoints() const { return checkpointData; } - //! Get allowed assumeutxo configuration. - //! @see ChainstateManager - const MapAssumeutxo& Assumeutxo() const { return m_assumeutxo_data; } + std::optional<AssumeutxoData> AssumeutxoForHeight(int height) const + { + return FindFirst(m_assumeutxo_data, [&](const auto& d) { return d.height == height; }); + } + std::optional<AssumeutxoData> AssumeutxoForBlockhash(const uint256& blockhash) const + { + return FindFirst(m_assumeutxo_data, [&](const auto& d) { return d.blockhash == blockhash; }); + } const ChainTxData& TxData() const { return chainTxData; } @@ -167,10 +177,9 @@ protected: CBlock genesis; std::vector<uint8_t> vFixedSeeds; bool fDefaultConsistencyChecks; - bool m_is_test_chain; bool m_is_mockable_chain; CCheckpointData checkpointData; - MapAssumeutxo m_assumeutxo_data; + std::vector<AssumeutxoData> m_assumeutxo_data; ChainTxData chainTxData; }; diff --git a/src/kernel/disconnected_transactions.h b/src/kernel/disconnected_transactions.h new file mode 100644 index 0000000000..7db39ba5ca --- /dev/null +++ b/src/kernel/disconnected_transactions.h @@ -0,0 +1,137 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_KERNEL_DISCONNECTED_TRANSACTIONS_H +#define BITCOIN_KERNEL_DISCONNECTED_TRANSACTIONS_H + +#include <core_memusage.h> +#include <memusage.h> +#include <primitives/transaction.h> +#include <util/hasher.h> + +#include <list> +#include <unordered_map> +#include <vector> + +/** Maximum kilobytes for transactions to store for processing during reorg */ +static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20'000; +/** + * DisconnectedBlockTransactions + + * During the reorg, it's desirable to re-add previously confirmed transactions + * to the mempool, so that anything not re-confirmed in the new chain is + * available to be mined. However, it's more efficient to wait until the reorg + * is complete and process all still-unconfirmed transactions at that time, + * since we expect most confirmed transactions to (typically) still be + * confirmed in the new chain, and re-accepting to the memory pool is expensive + * (and therefore better to not do in the middle of reorg-processing). + * Instead, store the disconnected transactions (in order!) as we go, remove any + * that are included in blocks in the new chain, and then process the remaining + * still-unconfirmed transactions at the end. + * + * Order of queuedTx: + * The front of the list should be the most recently-confirmed transactions (transactions at the + * end of vtx of blocks closer to the tip). If memory usage grows too large, we trim from the front + * of the list. After trimming, transactions can be re-added to the mempool from the back of the + * list to the front without running into missing inputs. + */ +class DisconnectedBlockTransactions { +private: + /** Cached dynamic memory usage for the CTransactions (memory for the shared pointers is + * included in the container calculations). */ + uint64_t cachedInnerUsage = 0; + const size_t m_max_mem_usage; + std::list<CTransactionRef> queuedTx; + using TxList = decltype(queuedTx); + std::unordered_map<uint256, TxList::iterator, SaltedTxidHasher> iters_by_txid; + + /** Trim the earliest-added entries until we are within memory bounds. */ + std::vector<CTransactionRef> LimitMemoryUsage() + { + std::vector<CTransactionRef> evicted; + + while (!queuedTx.empty() && DynamicMemoryUsage() > m_max_mem_usage) { + evicted.emplace_back(queuedTx.front()); + cachedInnerUsage -= RecursiveDynamicUsage(*queuedTx.front()); + iters_by_txid.erase(queuedTx.front()->GetHash()); + queuedTx.pop_front(); + } + return evicted; + } + +public: + DisconnectedBlockTransactions(size_t max_mem_usage) : m_max_mem_usage{max_mem_usage} {} + + // It's almost certainly a logic bug if we don't clear out queuedTx before + // destruction, as we add to it while disconnecting blocks, and then we + // need to re-process remaining transactions to ensure mempool consistency. + // For now, assert() that we've emptied out this object on destruction. + // This assert() can always be removed if the reorg-processing code were + // to be refactored such that this assumption is no longer true (for + // instance if there was some other way we cleaned up the mempool after a + // reorg, besides draining this object). + ~DisconnectedBlockTransactions() { + assert(queuedTx.empty()); + assert(iters_by_txid.empty()); + assert(cachedInnerUsage == 0); + } + + size_t DynamicMemoryUsage() const { + return cachedInnerUsage + memusage::DynamicUsage(iters_by_txid) + memusage::DynamicUsage(queuedTx); + } + + /** Add transactions from the block, iterating through vtx in reverse order. Callers should call + * this function for blocks in descending order by block height. + * We assume that callers never pass multiple transactions with the same txid, otherwise things + * can go very wrong in removeForBlock due to queuedTx containing an item without a + * corresponding entry in iters_by_txid. + * @returns vector of transactions that were evicted for size-limiting. + */ + [[nodiscard]] std::vector<CTransactionRef> AddTransactionsFromBlock(const std::vector<CTransactionRef>& vtx) + { + iters_by_txid.reserve(iters_by_txid.size() + vtx.size()); + for (auto block_it = vtx.rbegin(); block_it != vtx.rend(); ++block_it) { + auto it = queuedTx.insert(queuedTx.end(), *block_it); + iters_by_txid.emplace((*block_it)->GetHash(), it); + cachedInnerUsage += RecursiveDynamicUsage(**block_it); + } + return LimitMemoryUsage(); + } + + /** Remove any entries that are in this block. */ + void removeForBlock(const std::vector<CTransactionRef>& vtx) + { + // Short-circuit in the common case of a block being added to the tip + if (queuedTx.empty()) { + return; + } + for (const auto& tx : vtx) { + auto iter = iters_by_txid.find(tx->GetHash()); + if (iter != iters_by_txid.end()) { + auto list_iter = iter->second; + iters_by_txid.erase(iter); + cachedInnerUsage -= RecursiveDynamicUsage(**list_iter); + queuedTx.erase(list_iter); + } + } + } + + size_t size() const { return queuedTx.size(); } + + void clear() + { + cachedInnerUsage = 0; + iters_by_txid.clear(); + queuedTx.clear(); + } + + /** Clear all data structures and return the list of transactions. */ + std::list<CTransactionRef> take() + { + std::list<CTransactionRef> ret = std::move(queuedTx); + clear(); + return ret; + } +}; +#endif // BITCOIN_KERNEL_DISCONNECTED_TRANSACTIONS_H diff --git a/src/key.cpp b/src/key.cpp index efaea5b1b3..0f283ca3e3 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -159,21 +159,21 @@ bool CKey::Check(const unsigned char *vch) { } void CKey::MakeNewKey(bool fCompressedIn) { + MakeKeyData(); do { - GetStrongRandBytes(keydata); - } while (!Check(keydata.data())); - fValid = true; + GetStrongRandBytes(*keydata); + } while (!Check(keydata->data())); fCompressed = fCompressedIn; } bool CKey::Negate() { - assert(fValid); - return secp256k1_ec_seckey_negate(secp256k1_context_sign, keydata.data()); + assert(keydata); + return secp256k1_ec_seckey_negate(secp256k1_context_sign, keydata->data()); } CPrivKey CKey::GetPrivKey() const { - assert(fValid); + assert(keydata); CPrivKey seckey; int ret; size_t seckeylen; @@ -186,7 +186,7 @@ CPrivKey CKey::GetPrivKey() const { } CPubKey CKey::GetPubKey() const { - assert(fValid); + assert(keydata); secp256k1_pubkey pubkey; size_t clen = CPubKey::SIZE; CPubKey result; @@ -212,7 +212,7 @@ bool SigHasLowR(const secp256k1_ecdsa_signature* sig) } bool CKey::Sign(const uint256 &hash, std::vector<unsigned char>& vchSig, bool grind, uint32_t test_case) const { - if (!fValid) + if (!keydata) return false; vchSig.resize(CPubKey::SIGNATURE_SIZE); size_t nSigLen = CPubKey::SIGNATURE_SIZE; @@ -253,7 +253,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const { } bool CKey::SignCompact(const uint256 &hash, std::vector<unsigned char>& vchSig) const { - if (!fValid) + if (!keydata) return false; vchSig.resize(CPubKey::COMPACT_SIGNATURE_SIZE); int rec = -1; @@ -301,10 +301,12 @@ bool CKey::SignSchnorr(const uint256& hash, Span<unsigned char> sig, const uint2 } bool CKey::Load(const CPrivKey &seckey, const CPubKey &vchPubKey, bool fSkipCheck=false) { - if (!ec_seckey_import_der(secp256k1_context_sign, (unsigned char*)begin(), seckey.data(), seckey.size())) + MakeKeyData(); + if (!ec_seckey_import_der(secp256k1_context_sign, (unsigned char*)begin(), seckey.data(), seckey.size())) { + ClearKeyData(); return false; + } fCompressed = vchPubKey.IsCompressed(); - fValid = true; if (fSkipCheck) return true; @@ -325,22 +327,21 @@ bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const BIP32Hash(cc, nChild, 0, begin(), vout.data()); } memcpy(ccChild.begin(), vout.data()+32, 32); - memcpy((unsigned char*)keyChild.begin(), begin(), 32); + keyChild.Set(begin(), begin() + 32, true); bool ret = secp256k1_ec_seckey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), vout.data()); - keyChild.fCompressed = true; - keyChild.fValid = ret; + if (!ret) keyChild.ClearKeyData(); return ret; } EllSwiftPubKey CKey::EllSwiftCreate(Span<const std::byte> ent32) const { - assert(fValid); + assert(keydata); assert(ent32.size() == 32); std::array<std::byte, EllSwiftPubKey::size()> encoded_pubkey; auto success = secp256k1_ellswift_create(secp256k1_context_sign, UCharCast(encoded_pubkey.data()), - keydata.data(), + keydata->data(), UCharCast(ent32.data())); // Should always succeed for valid keys (asserted above). @@ -350,7 +351,7 @@ EllSwiftPubKey CKey::EllSwiftCreate(Span<const std::byte> ent32) const ECDHSecret CKey::ComputeBIP324ECDHSecret(const EllSwiftPubKey& their_ellswift, const EllSwiftPubKey& our_ellswift, bool initiating) const { - assert(fValid); + assert(keydata); ECDHSecret output; // BIP324 uses the initiator as party A, and the responder as party B. Remap the inputs @@ -359,7 +360,7 @@ ECDHSecret CKey::ComputeBIP324ECDHSecret(const EllSwiftPubKey& their_ellswift, c UCharCast(output.data()), UCharCast(initiating ? our_ellswift.data() : their_ellswift.data()), UCharCast(initiating ? their_ellswift.data() : our_ellswift.data()), - keydata.data(), + keydata->data(), initiating ? 0 : 1, secp256k1_ellswift_xdh_hash_function_bip324, nullptr); @@ -46,57 +46,77 @@ public: "COMPRESSED_SIZE is larger than SIZE"); private: - //! Whether this private key is valid. We check for correctness when modifying the key - //! data, so fValid should always correspond to the actual state. - bool fValid{false}; + /** Internal data container for private key material. */ + using KeyType = std::array<unsigned char, 32>; //! Whether the public key corresponding to this private key is (to be) compressed. bool fCompressed{false}; - //! The actual byte data - std::vector<unsigned char, secure_allocator<unsigned char> > keydata; + //! The actual byte data. nullptr for invalid keys. + secure_unique_ptr<KeyType> keydata; //! Check whether the 32-byte array pointed to by vch is valid keydata. bool static Check(const unsigned char* vch); + void MakeKeyData() + { + if (!keydata) keydata = make_secure_unique<KeyType>(); + } + + void ClearKeyData() + { + keydata.reset(); + } + public: - //! Construct an invalid private key. - CKey() + CKey() noexcept = default; + CKey(CKey&&) noexcept = default; + CKey& operator=(CKey&&) noexcept = default; + + CKey& operator=(const CKey& other) { - // Important: vch must be 32 bytes in length to not break serialization - keydata.resize(32); + if (other.keydata) { + MakeKeyData(); + *keydata = *other.keydata; + } else { + ClearKeyData(); + } + fCompressed = other.fCompressed; + return *this; } + CKey(const CKey& other) { *this = other; } + friend bool operator==(const CKey& a, const CKey& b) { return a.fCompressed == b.fCompressed && a.size() == b.size() && - memcmp(a.keydata.data(), b.keydata.data(), a.size()) == 0; + memcmp(a.data(), b.data(), a.size()) == 0; } //! Initialize using begin and end iterators to byte data. template <typename T> void Set(const T pbegin, const T pend, bool fCompressedIn) { - if (size_t(pend - pbegin) != keydata.size()) { - fValid = false; + if (size_t(pend - pbegin) != std::tuple_size_v<KeyType>) { + ClearKeyData(); } else if (Check(&pbegin[0])) { - memcpy(keydata.data(), (unsigned char*)&pbegin[0], keydata.size()); - fValid = true; + MakeKeyData(); + memcpy(keydata->data(), (unsigned char*)&pbegin[0], keydata->size()); fCompressed = fCompressedIn; } else { - fValid = false; + ClearKeyData(); } } //! Simple read-only vector-like interface. - unsigned int size() const { return (fValid ? keydata.size() : 0); } - const std::byte* data() const { return reinterpret_cast<const std::byte*>(keydata.data()); } - const unsigned char* begin() const { return keydata.data(); } - const unsigned char* end() const { return keydata.data() + size(); } + unsigned int size() const { return keydata ? keydata->size() : 0; } + const std::byte* data() const { return keydata ? reinterpret_cast<const std::byte*>(keydata->data()) : nullptr; } + const unsigned char* begin() const { return keydata ? keydata->data() : nullptr; } + const unsigned char* end() const { return begin() + size(); } //! Check whether this private key is valid. - bool IsValid() const { return fValid; } + bool IsValid() const { return !!keydata; } //! Check whether the public key corresponding to this private key is (to be) compressed. bool IsCompressed() const { return fCompressed; } diff --git a/src/key_io.cpp b/src/key_io.cpp index 1a0b51a28b..5bcbb8a069 100644 --- a/src/key_io.cpp +++ b/src/key_io.cpp @@ -67,16 +67,18 @@ public: std::string operator()(const WitnessUnknown& id) const { - if (id.version < 1 || id.version > 16 || id.length < 2 || id.length > 40) { + const std::vector<unsigned char>& program = id.GetWitnessProgram(); + if (id.GetWitnessVersion() < 1 || id.GetWitnessVersion() > 16 || program.size() < 2 || program.size() > 40) { return {}; } - std::vector<unsigned char> data = {(unsigned char)id.version}; - data.reserve(1 + (id.length * 8 + 4) / 5); - ConvertBits<8, 5, true>([&](unsigned char c) { data.push_back(c); }, id.program, id.program + id.length); + std::vector<unsigned char> data = {(unsigned char)id.GetWitnessVersion()}; + data.reserve(1 + (program.size() * 8 + 4) / 5); + ConvertBits<8, 5, true>([&](unsigned char c) { data.push_back(c); }, program.begin(), program.end()); return bech32::Encode(bech32::Encoding::BECH32M, m_params.Bech32HRP(), data); } std::string operator()(const CNoDestination& no) const { return {}; } + std::string operator()(const PubKeyDestination& pk) const { return {}; } }; CTxDestination DecodeDestination(const std::string& str, const CChainParams& params, std::string& error_str, std::vector<int>* error_locations) @@ -189,11 +191,7 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par return CNoDestination(); } - WitnessUnknown unk; - unk.version = version; - std::copy(data.begin(), data.end(), unk.program); - unk.length = data.size(); - return unk; + return WitnessUnknown{version, data}; } else { error_str = strprintf("Invalid padding in Bech32 data section"); return CNoDestination(); diff --git a/src/memusage.h b/src/memusage.h index bb39066a7d..08be66172e 100644 --- a/src/memusage.h +++ b/src/memusage.h @@ -11,6 +11,7 @@ #include <cassert> #include <cstdlib> +#include <list> #include <map> #include <memory> #include <set> @@ -149,6 +150,21 @@ static inline size_t DynamicUsage(const std::shared_ptr<X>& p) } template<typename X> +struct list_node +{ +private: + void* ptr_next; + void* ptr_prev; + X x; +}; + +template<typename X> +static inline size_t DynamicUsage(const std::list<X>& l) +{ + return MallocUsage(sizeof(list_node<X>)) * l.size(); +} + +template<typename X> struct unordered_node : private X { private: diff --git a/src/net.cpp b/src/net.cpp index bfa2738e45..6b2ef5f43d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -35,6 +35,7 @@ #include <util/threadinterrupt.h> #include <util/trace.h> #include <util/translation.h> +#include <util/vector.h> #ifdef WIN32 #include <string.h> @@ -157,39 +158,34 @@ uint16_t GetListenPort() return static_cast<uint16_t>(gArgs.GetIntArg("-port", Params().GetDefaultPort())); } -// find 'best' local address for a particular peer -bool GetLocal(CService& addr, const CNode& peer) +// Determine the "best" local address for a particular peer. +[[nodiscard]] static std::optional<CService> GetLocal(const CNode& peer) { - if (!fListen) - return false; + if (!fListen) return std::nullopt; + std::optional<CService> addr; int nBestScore = -1; int nBestReachability = -1; { LOCK(g_maplocalhost_mutex); - for (const auto& entry : mapLocalHost) - { + for (const auto& [local_addr, local_service_info] : mapLocalHost) { // For privacy reasons, don't advertise our privacy-network address // to other networks and don't advertise our other-network address // to privacy networks. - const Network our_net{entry.first.GetNetwork()}; - const Network peers_net{peer.ConnectedThroughNetwork()}; - if (our_net != peers_net && - (our_net == NET_ONION || our_net == NET_I2P || - peers_net == NET_ONION || peers_net == NET_I2P)) { + if (local_addr.GetNetwork() != peer.ConnectedThroughNetwork() + && (local_addr.IsPrivacyNet() || peer.IsConnectedThroughPrivacyNet())) { continue; } - int nScore = entry.second.nScore; - int nReachability = entry.first.GetReachabilityFrom(peer.addr); - if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) - { - addr = CService(entry.first, entry.second.nPort); + const int nScore{local_service_info.nScore}; + const int nReachability{local_addr.GetReachabilityFrom(peer.addr)}; + if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { + addr.emplace(CService{local_addr, local_service_info.nPort}); nBestReachability = nReachability; nBestScore = nScore; } } } - return nBestScore >= 0; + return addr; } //! Convert the serialized seeds into usable address objects. @@ -215,17 +211,13 @@ static std::vector<CAddress> ConvertSeeds(const std::vector<uint8_t> &vSeedsIn) return vSeedsOut; } -// get best local address for a particular peer as a CAddress -// Otherwise, return the unroutable 0.0.0.0 but filled in with +// Determine the "best" local address for a particular peer. +// If none, return the unroutable 0.0.0.0 but filled in with // the normal parameters, since the IP may be changed to a useful // one by discovery. CService GetLocalAddress(const CNode& peer) { - CService addr; - if (GetLocal(addr, peer)) { - return addr; - } - return CService{CNetAddr(), GetListenPort()}; + return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()}); } static int GetnScore(const CService& addr) @@ -236,7 +228,7 @@ static int GetnScore(const CService& addr) } // Is our peer's addrLocal potentially useful as an external IP source? -bool IsPeerAddrLocalGood(CNode *pnode) +[[nodiscard]] static bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && @@ -288,7 +280,7 @@ std::optional<CService> GetLocalAddrForPeer(CNode& node) CService MaybeFlipIPv6toCJDNS(const CService& service) { CService ret{service}; - if (ret.m_net == NET_IPV6 && ret.m_addr[0] == 0xfc && IsReachable(NET_CJDNS)) { + if (ret.IsIPv6() && ret.HasCJDNSPrefix() && IsReachable(NET_CJDNS)) { ret.m_net = NET_CJDNS; } return ret; @@ -447,7 +439,7 @@ static CAddress GetBindAddress(const Sock& sock) return addr_bind; } -CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) +CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); assert(conn_type != ConnectionType::INBOUND); @@ -465,7 +457,8 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo } } - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying connection %s lastseen=%.1fhrs\n", + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying %s connection %s lastseen=%.1fhrs\n", + use_v2transport ? "v2" : "v1", pszDest ? pszDest : addrConnect.ToStringAddrPort(), Ticks<HoursDouble>(pszDest ? 0h : Now<NodeSeconds>() - addrConnect.nTime)); @@ -504,7 +497,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo const bool use_proxy{GetProxy(addrConnect.GetNetwork(), proxy)}; bool proxyConnectionFailed = false; - if (addrConnect.GetNetwork() == NET_I2P && use_proxy) { + if (addrConnect.IsI2P() && use_proxy) { i2p::Connection conn; if (m_i2p_sam_session) { @@ -588,6 +581,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo CNodeOptions{ .i2p_sam_session = std::move(i2p_transient_session), .recv_flood_size = nReceiveFloodSize, + .use_v2transport = use_v2transport, }); pnode->AddRef(); @@ -636,6 +630,11 @@ Network CNode::ConnectedThroughNetwork() const return m_inbound_onion ? NET_ONION : addr.GetNetClass(); } +bool CNode::IsConnectedThroughPrivacyNet() const +{ + return m_inbound_onion || addr.IsPrivacyNet(); +} + #undef X #define X(name) stats.name = name void CNode::CopyStats(CNodeStats& stats) @@ -668,6 +667,9 @@ void CNode::CopyStats(CNodeStats& stats) LOCK(cs_vRecv); X(mapRecvBytesPerMsgType); X(nRecvBytes); + Transport::Info info = m_transport->GetInfo(); + stats.m_transport_type = info.transport_type; + if (info.session_id) stats.m_session_id = HexStr(*info.session_id); } X(m_permission_flags); @@ -735,6 +737,11 @@ V1Transport::V1Transport(const NodeId node_id, int nTypeIn, int nVersionIn) noex Reset(); } +Transport::Info V1Transport::GetInfo() const noexcept +{ + return {.transport_type = TransportProtocolType::V1, .session_id = {}}; +} + int V1Transport::readHeader(Span<const uint8_t> msg_bytes) { AssertLockHeld(m_recv_mutex); @@ -858,7 +865,7 @@ bool V1Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept // serialize header m_header_to_send.clear(); - CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, m_header_to_send, 0, hdr}; + CVectorWriter{INIT_PROTO_VERSION, m_header_to_send, 0, hdr}; // update state m_message_to_send = std::move(msg); @@ -899,8 +906,7 @@ void V1Transport::MarkBytesSent(size_t bytes_sent) noexcept m_bytes_sent = 0; } else if (!m_sending_header && m_bytes_sent == m_message_to_send.data.size()) { // We're done sending a message's data. Wipe the data vector to reduce memory consumption. - m_message_to_send.data.clear(); - m_message_to_send.data.shrink_to_fit(); + ClearShrink(m_message_to_send.data); m_bytes_sent = 0; } } @@ -1006,8 +1012,7 @@ void V2Transport::StartSendingHandshake() noexcept m_send_buffer.resize(EllSwiftPubKey::size() + m_send_garbage.size()); std::copy(std::begin(m_cipher.GetOurPubKey()), std::end(m_cipher.GetOurPubKey()), MakeWritableByteSpan(m_send_buffer).begin()); std::copy(m_send_garbage.begin(), m_send_garbage.end(), m_send_buffer.begin() + EllSwiftPubKey::size()); - // We cannot wipe m_send_garbage as it will still be used to construct the garbage - // authentication packet. + // We cannot wipe m_send_garbage as it will still be used as AAD later in the handshake. } V2Transport::V2Transport(NodeId nodeid, bool initiating, int type_in, int version_in, const CKey& key, Span<const std::byte> ent32, std::vector<uint8_t> garbage) noexcept : @@ -1041,9 +1046,6 @@ void V2Transport::SetReceiveState(RecvState recv_state) noexcept Assume(recv_state == RecvState::GARB_GARBTERM); break; case RecvState::GARB_GARBTERM: - Assume(recv_state == RecvState::GARBAUTH); - break; - case RecvState::GARBAUTH: Assume(recv_state == RecvState::VERSION); break; case RecvState::VERSION: @@ -1123,8 +1125,8 @@ void V2Transport::ProcessReceivedMaybeV1Bytes() noexcept SetReceiveState(RecvState::V1); SetSendState(SendState::V1); // Reset v2 transport buffers to save memory. - m_recv_buffer = {}; - m_send_buffer = {}; + ClearShrink(m_recv_buffer); + ClearShrink(m_send_buffer); } else { // We have not received enough to distinguish v1 from v2 yet. Wait until more bytes come. } @@ -1175,25 +1177,15 @@ bool V2Transport::ProcessReceivedKeyBytes() noexcept m_cipher.GetSendGarbageTerminator().end(), MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN).begin()); - // Construct garbage authentication packet in the send buffer (using the garbage data which - // is still there). - m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::EXPANSION); - m_cipher.Encrypt( - /*contents=*/{}, - /*aad=*/MakeByteSpan(m_send_garbage), - /*ignore=*/false, - /*output=*/MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::EXPANSION)); - // We no longer need the garbage. - m_send_garbage.clear(); - m_send_garbage.shrink_to_fit(); - - // Construct version packet in the send buffer. + // Construct version packet in the send buffer, with the sent garbage data as AAD. m_send_buffer.resize(m_send_buffer.size() + BIP324Cipher::EXPANSION + VERSION_CONTENTS.size()); m_cipher.Encrypt( /*contents=*/VERSION_CONTENTS, - /*aad=*/{}, + /*aad=*/MakeByteSpan(m_send_garbage), /*ignore=*/false, /*output=*/MakeWritableByteSpan(m_send_buffer).last(BIP324Cipher::EXPANSION + VERSION_CONTENTS.size())); + // We no longer need the garbage. + ClearShrink(m_send_garbage); } else { // We still have to receive more key bytes. } @@ -1207,11 +1199,11 @@ bool V2Transport::ProcessReceivedGarbageBytes() noexcept Assume(m_recv_buffer.size() <= MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN); if (m_recv_buffer.size() >= BIP324Cipher::GARBAGE_TERMINATOR_LEN) { if (MakeByteSpan(m_recv_buffer).last(BIP324Cipher::GARBAGE_TERMINATOR_LEN) == m_cipher.GetReceiveGarbageTerminator()) { - // Garbage terminator received. Switch to receiving garbage authentication packet. - m_recv_garbage = std::move(m_recv_buffer); - m_recv_garbage.resize(m_recv_garbage.size() - BIP324Cipher::GARBAGE_TERMINATOR_LEN); + // Garbage terminator received. Store garbage to authenticate it as AAD later. + m_recv_aad = std::move(m_recv_buffer); + m_recv_aad.resize(m_recv_aad.size() - BIP324Cipher::GARBAGE_TERMINATOR_LEN); m_recv_buffer.clear(); - SetReceiveState(RecvState::GARBAUTH); + SetReceiveState(RecvState::VERSION); } else if (m_recv_buffer.size() == MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN) { // We've reached the maximum length for garbage + garbage terminator, and the // terminator still does not match. Abort. @@ -1230,8 +1222,7 @@ bool V2Transport::ProcessReceivedGarbageBytes() noexcept bool V2Transport::ProcessReceivedPacketBytes() noexcept { AssertLockHeld(m_recv_mutex); - Assume(m_recv_state == RecvState::GARBAUTH || m_recv_state == RecvState::VERSION || - m_recv_state == RecvState::APP); + Assume(m_recv_state == RecvState::VERSION || m_recv_state == RecvState::APP); // The maximum permitted contents length for a packet, consisting of: // - 0x00 byte: indicating long message type encoding @@ -1254,50 +1245,42 @@ bool V2Transport::ProcessReceivedPacketBytes() noexcept // as GetMaxBytesToProcess only allows up to LENGTH_LEN into the buffer before that point. m_recv_decode_buffer.resize(m_recv_len); bool ignore{false}; - Span<const std::byte> aad; - if (m_recv_state == RecvState::GARBAUTH) aad = MakeByteSpan(m_recv_garbage); bool ret = m_cipher.Decrypt( /*input=*/MakeByteSpan(m_recv_buffer).subspan(BIP324Cipher::LENGTH_LEN), - /*aad=*/aad, + /*aad=*/MakeByteSpan(m_recv_aad), /*ignore=*/ignore, /*contents=*/MakeWritableByteSpan(m_recv_decode_buffer)); if (!ret) { LogPrint(BCLog::NET, "V2 transport error: packet decryption failure (%u bytes), peer=%d\n", m_recv_len, m_nodeid); return false; } + // We have decrypted a valid packet with the AAD we expected, so clear the expected AAD. + ClearShrink(m_recv_aad); // Feed the last 4 bytes of the Poly1305 authentication tag (and its timing) into our RNG. RandAddEvent(ReadLE32(m_recv_buffer.data() + m_recv_buffer.size() - 4)); - // At this point we have a valid packet decrypted into m_recv_decode_buffer. Depending on - // the current state, decide what to do with it. - switch (m_recv_state) { - case RecvState::GARBAUTH: - // Ignore flag does not matter for garbage authentication. Any valid packet functions - // as authentication. Receive and process the version packet next. - SetReceiveState(RecvState::VERSION); - m_recv_garbage = {}; - break; - case RecvState::VERSION: - if (!ignore) { + // At this point we have a valid packet decrypted into m_recv_decode_buffer. If it's not a + // decoy, which we simply ignore, use the current state to decide what to do with it. + if (!ignore) { + switch (m_recv_state) { + case RecvState::VERSION: // Version message received; transition to application phase. The contents is // ignored, but can be used for future extensions. SetReceiveState(RecvState::APP); - } - break; - case RecvState::APP: - if (!ignore) { + break; + case RecvState::APP: // Application message decrypted correctly. It can be extracted using GetMessage(). SetReceiveState(RecvState::APP_READY); + break; + default: + // Any other state is invalid (this function should not have been called). + Assume(false); } - break; - default: - // Any other state is invalid (this function should not have been called). - Assume(false); } // Wipe the receive buffer where the next packet will be received into. - m_recv_buffer = {}; + ClearShrink(m_recv_buffer); // In all but APP_READY state, we can wipe the decoded contents. - if (m_recv_state != RecvState::APP_READY) m_recv_decode_buffer = {}; + if (m_recv_state != RecvState::APP_READY) ClearShrink(m_recv_decode_buffer); } else { // We either have less than 3 bytes, so we don't know the packet's length yet, or more // than 3 bytes but less than the packet's full ciphertext. Wait until those arrive. @@ -1328,7 +1311,6 @@ size_t V2Transport::GetMaxBytesToProcess() noexcept case RecvState::GARB_GARBTERM: // Process garbage bytes one by one (because terminator may appear anywhere). return 1; - case RecvState::GARBAUTH: case RecvState::VERSION: case RecvState::APP: // These three states all involve decoding a packet. Process the length descriptor first, @@ -1382,7 +1364,6 @@ bool V2Transport::ReceivedBytes(Span<const uint8_t>& msg_bytes) noexcept // bytes). m_recv_buffer.reserve(MAX_GARBAGE_LEN + BIP324Cipher::GARBAGE_TERMINATOR_LEN); break; - case RecvState::GARBAUTH: case RecvState::VERSION: case RecvState::APP: { // During states where a packet is being received, as much as is expected but never @@ -1426,7 +1407,6 @@ bool V2Transport::ReceivedBytes(Span<const uint8_t>& msg_bytes) noexcept if (!ProcessReceivedGarbageBytes()) return false; break; - case RecvState::GARBAUTH: case RecvState::VERSION: case RecvState::APP: if (!ProcessReceivedPacketBytes()) return false; @@ -1511,7 +1491,7 @@ CNetMessage V2Transport::GetReceivedMessage(std::chrono::microseconds time, bool LogPrint(BCLog::NET, "V2 transport error: invalid message type (%u bytes contents), peer=%d\n", m_recv_decode_buffer.size(), m_nodeid); reject_message = true; } - m_recv_decode_buffer = {}; + ClearShrink(m_recv_decode_buffer); SetReceiveState(RecvState::APP); return msg; @@ -1545,7 +1525,7 @@ bool V2Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept m_cipher.Encrypt(MakeByteSpan(contents), {}, false, MakeWritableByteSpan(m_send_buffer)); m_send_type = msg.m_type; // Release memory - msg.data = {}; + ClearShrink(msg.data); return true; } @@ -1572,15 +1552,39 @@ void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept LOCK(m_send_mutex); if (m_send_state == SendState::V1) return m_v1_fallback.MarkBytesSent(bytes_sent); + if (m_send_state == SendState::AWAITING_KEY && m_send_pos == 0 && bytes_sent > 0) { + LogPrint(BCLog::NET, "start sending v2 handshake to peer=%d\n", m_nodeid); + } + m_send_pos += bytes_sent; Assume(m_send_pos <= m_send_buffer.size()); + if (m_send_pos >= CMessageHeader::HEADER_SIZE) { + m_sent_v1_header_worth = true; + } // Wipe the buffer when everything is sent. if (m_send_pos == m_send_buffer.size()) { m_send_pos = 0; - m_send_buffer = {}; + ClearShrink(m_send_buffer); } } +bool V2Transport::ShouldReconnectV1() const noexcept +{ + AssertLockNotHeld(m_send_mutex); + AssertLockNotHeld(m_recv_mutex); + // Only outgoing connections need reconnection. + if (!m_initiating) return false; + + LOCK(m_recv_mutex); + // We only reconnect in the very first state and when the receive buffer is empty. Together + // these conditions imply nothing has been received so far. + if (m_recv_state != RecvState::KEY) return false; + if (!m_recv_buffer.empty()) return false; + // Check if we've sent enough for the other side to disconnect us (if it was V1). + LOCK(m_send_mutex); + return m_sent_v1_header_worth; +} + size_t V2Transport::GetSendMemoryUsage() const noexcept { AssertLockNotHeld(m_send_mutex); @@ -1590,6 +1594,27 @@ size_t V2Transport::GetSendMemoryUsage() const noexcept return sizeof(m_send_buffer) + memusage::DynamicUsage(m_send_buffer); } +Transport::Info V2Transport::GetInfo() const noexcept +{ + AssertLockNotHeld(m_recv_mutex); + LOCK(m_recv_mutex); + if (m_recv_state == RecvState::V1) return m_v1_fallback.GetInfo(); + + Transport::Info info; + + // Do not report v2 and session ID until the version packet has been received + // and verified (confirming that the other side very likely has the same keys as us). + if (m_recv_state != RecvState::KEY_MAYBE_V1 && m_recv_state != RecvState::KEY && + m_recv_state != RecvState::GARB_GARBTERM && m_recv_state != RecvState::VERSION) { + info.transport_type = TransportProtocolType::V2; + info.session_id = uint256(MakeUCharSpan(m_cipher.GetSessionID())); + } else { + info.transport_type = TransportProtocolType::DETECTING; + } + + return info; +} + std::pair<size_t, bool> CConnman::SocketSendData(CNode& node) const { auto it = node.vSendMsg.begin(); @@ -1639,7 +1664,9 @@ std::pair<size_t, bool> CConnman::SocketSendData(CNode& node) const // Notify transport that bytes have been processed. node.m_transport->MarkBytesSent(nBytes); // Update statistics per message type. - node.AccountForSentBytes(msg_type, nBytes); + if (!msg_type.empty()) { // don't report v2 handshake bytes for now + node.AccountForSentBytes(msg_type, nBytes); + } nSentSize += nBytes; if ((size_t)nBytes != data.size()) { // could not send full message; stop sending more @@ -1822,6 +1849,10 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock, } const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is + // detected, so use it whenever we signal NODE_P2P_V2. + const bool use_v2transport(nodeServices & NODE_P2P_V2); + CNode* pnode = new CNode(id, std::move(sock), addr, @@ -1835,6 +1866,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock, .permission_flags = permission_flags, .prefer_evict = discouraged, .recv_flood_size = nReceiveFloodSize, + .use_v2transport = use_v2transport, }); pnode->AddRef(); m_msgproc->InitializeNode(*pnode, nodeServices); @@ -1883,12 +1915,19 @@ bool CConnman::AddConnection(const std::string& address, ConnectionType conn_typ CSemaphoreGrant grant(*semOutbound, true); if (!grant) return false; - OpenNetworkConnection(CAddress(), false, &grant, address.c_str(), conn_type); + OpenNetworkConnection(CAddress(), false, std::move(grant), address.c_str(), conn_type, /*use_v2transport=*/false); return true; } void CConnman::DisconnectNodes() { + AssertLockNotHeld(m_nodes_mutex); + AssertLockNotHeld(m_reconnections_mutex); + + // Use a temporary variable to accumulate desired reconnections, so we don't need + // m_reconnections_mutex while holding m_nodes_mutex. + decltype(m_reconnections) reconnections_to_add; + { LOCK(m_nodes_mutex); @@ -1911,6 +1950,19 @@ void CConnman::DisconnectNodes() // remove from m_nodes m_nodes.erase(remove(m_nodes.begin(), m_nodes.end(), pnode), m_nodes.end()); + // Add to reconnection list if appropriate. We don't reconnect right here, because + // the creation of a connection is a blocking operation (up to several seconds), + // and we don't want to hold up the socket handler thread for that long. + if (pnode->m_transport->ShouldReconnectV1()) { + reconnections_to_add.push_back({ + .addr_connect = pnode->addr, + .grant = std::move(pnode->grantOutbound), + .destination = pnode->m_dest, + .conn_type = pnode->m_conn_type, + .use_v2transport = false}); + LogPrint(BCLog::NET, "retrying with v1 transport protocol for peer=%d\n", pnode->GetId()); + } + // release outbound grant (if any) pnode->grantOutbound.Release(); @@ -1938,6 +1990,11 @@ void CConnman::DisconnectNodes() } } } + { + // Move entries from reconnections_to_add to m_reconnections. + LOCK(m_reconnections_mutex); + m_reconnections.splice(m_reconnections.end(), std::move(reconnections_to_add)); + } } void CConnman::NotifyNumConnectionsChanged() @@ -2315,9 +2372,9 @@ void CConnman::ProcessAddrFetch() m_addr_fetches.pop_front(); } CAddress addr; - CSemaphoreGrant grant(*semOutbound, true); + CSemaphoreGrant grant(*semOutbound, /*fTry=*/true); if (grant) { - OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH); + OpenNetworkConnection(addr, false, std::move(grant), strDest.c_str(), ConnectionType::ADDR_FETCH, /*use_v2transport=*/false); } } @@ -2410,6 +2467,7 @@ bool CConnman::MaybePickPreferredNetwork(std::optional<Network>& network) void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); + AssertLockNotHeld(m_reconnections_mutex); FastRandomContext rng; // Connect to specific addresses if (!connect.empty()) @@ -2419,7 +2477,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL); + OpenNetworkConnection(addr, false, {}, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) @@ -2453,6 +2511,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; + PerformReconnections(); + CSemaphoreGrant grant(*semOutbound); if (interruptNet) return; @@ -2473,7 +2533,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // Perform cheap checks before locking a mutex. else if (!dnsseed && !use_seednodes) { LOCK(m_added_nodes_mutex); - if (m_added_nodes.empty()) { + if (m_added_node_params.empty()) { add_fixed_seeds_now = true; LogPrintf("Adding fixed seeds as -dnsseed=0 (or IPv4/IPv6 connections are disabled via -onlynet) and neither -addnode nor -seednode are provided\n"); } @@ -2722,7 +2782,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // Don't record addrman failure attempts when node is offline. This can be identified since all local // network connections (if any) belong in the same netgroup, and the size of `outbound_ipv46_peer_netgroups` would only be 1. const bool count_failures{((int)outbound_ipv46_peer_netgroups.size() + outbound_privacy_network_peers) >= std::min(nMaxConnections - 1, 2)}; - OpenNetworkConnection(addrConnect, count_failures, &grant, /*strDest=*/nullptr, conn_type); + // Use BIP324 transport when both us and them have NODE_V2_P2P set. + const bool use_v2transport(addrConnect.nServices & GetLocalServices() & NODE_P2P_V2); + OpenNetworkConnection(addrConnect, count_failures, std::move(grant), /*strDest=*/nullptr, conn_type, use_v2transport); } } } @@ -2744,11 +2806,11 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const { std::vector<AddedNodeInfo> ret; - std::list<std::string> lAddresses(0); + std::list<AddedNodeParams> lAddresses(0); { LOCK(m_added_nodes_mutex); - ret.reserve(m_added_nodes.size()); - std::copy(m_added_nodes.cbegin(), m_added_nodes.cend(), std::back_inserter(lAddresses)); + ret.reserve(m_added_node_params.size()); + std::copy(m_added_node_params.cbegin(), m_added_node_params.cend(), std::back_inserter(lAddresses)); } @@ -2768,9 +2830,9 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const } } - for (const std::string& strAddNode : lAddresses) { - CService service(LookupNumeric(strAddNode, GetDefaultPort(strAddNode))); - AddedNodeInfo addedNode{strAddNode, CService(), false, false}; + for (const auto& addr : lAddresses) { + CService service(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node))); + AddedNodeInfo addedNode{addr, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); @@ -2781,7 +2843,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const } } else { // strAddNode is a name - auto it = mapConnectedByName.find(strAddNode); + auto it = mapConnectedByName.find(addr.m_added_node); if (it != mapConnectedByName.end()) { addedNode.resolvedAddress = it->second.second; addedNode.fConnected = true; @@ -2797,6 +2859,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const void CConnman::ThreadOpenAddedConnections() { AssertLockNotHeld(m_unused_i2p_sessions_mutex); + AssertLockNotHeld(m_reconnections_mutex); while (true) { CSemaphoreGrant grant(*semAddnode); @@ -2804,26 +2867,28 @@ void CConnman::ThreadOpenAddedConnections() bool tried = false; for (const AddedNodeInfo& info : vInfo) { if (!info.fConnected) { - if (!grant.TryAcquire()) { + if (!grant) { // If we've used up our semaphore and need a new one, let's not wait here since while we are waiting // the addednodeinfo state might change. break; } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL); - if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) - return; + OpenNetworkConnection(addr, false, std::move(grant), info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport); + if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; + grant = CSemaphoreGrant(*semAddnode, /*fTry=*/true); } } // Retry every 60 seconds if a connection was attempted, otherwise two seconds if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) return; + // See if any reconnections are desired. + PerformReconnections(); } } // if successful, this moves the passed grant to the constructed node -void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type) +void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); assert(conn_type != ConnectionType::INBOUND); @@ -2845,12 +2910,11 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai } else if (FindNode(std::string(pszDest))) return; - CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type); + CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type, use_v2transport); if (!pnode) return; - if (grantOutbound) - grantOutbound->MoveTo(pnode->grantOutbound); + pnode->grantOutbound = std::move(grant_outbound); m_msgproc->InitializeNode(*pnode, nLocalServices); { @@ -3403,23 +3467,23 @@ std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addres return cache_entry.m_addrs_response_cache; } -bool CConnman::AddNode(const std::string& strNode) +bool CConnman::AddNode(const AddedNodeParams& add) { LOCK(m_added_nodes_mutex); - for (const std::string& it : m_added_nodes) { - if (strNode == it) return false; + for (const auto& it : m_added_node_params) { + if (add.m_added_node == it.m_added_node) return false; } - m_added_nodes.push_back(strNode); + m_added_node_params.push_back(add); return true; } bool CConnman::RemoveAddedNode(const std::string& strNode) { LOCK(m_added_nodes_mutex); - for(std::vector<std::string>::iterator it = m_added_nodes.begin(); it != m_added_nodes.end(); ++it) { - if (strNode == *it) { - m_added_nodes.erase(it); + for (auto it = m_added_node_params.begin(); it != m_added_node_params.end(); ++it) { + if (strNode == it->m_added_node) { + m_added_node_params.erase(it); return true; } } @@ -3607,6 +3671,15 @@ ServiceFlags CConnman::GetLocalServices() const return nLocalServices; } +static std::unique_ptr<Transport> MakeTransport(NodeId id, bool use_v2transport, bool inbound) noexcept +{ + if (use_v2transport) { + return std::make_unique<V2Transport>(id, /*initiating=*/!inbound, SER_NETWORK, INIT_PROTO_VERSION); + } else { + return std::make_unique<V1Transport>(id, SER_NETWORK, INIT_PROTO_VERSION); + } +} + CNode::CNode(NodeId idIn, std::shared_ptr<Sock> sock, const CAddress& addrIn, @@ -3617,13 +3690,14 @@ CNode::CNode(NodeId idIn, ConnectionType conn_type_in, bool inbound_onion, CNodeOptions&& node_opts) - : m_transport{std::make_unique<V1Transport>(idIn, SER_NETWORK, INIT_PROTO_VERSION)}, + : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)}, m_permission_flags{node_opts.permission_flags}, m_sock{sock}, m_connected{GetTime<std::chrono::seconds>()}, addr{addrIn}, addrBind{addrBindIn}, m_addr_name{addrNameIn.empty() ? addr.ToStringAddrPort() : addrNameIn}, + m_dest(addrNameIn), m_inbound_onion{inbound_onion}, m_prefer_evict{node_opts.prefer_evict}, nKeyedNetGroup{nKeyedNetGroupIn}, @@ -3754,10 +3828,38 @@ uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& address) const return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup).Finalize(); } -void CaptureMessageToFile(const CAddress& addr, - const std::string& msg_type, - Span<const unsigned char> data, - bool is_incoming) +void CConnman::PerformReconnections() +{ + AssertLockNotHeld(m_reconnections_mutex); + AssertLockNotHeld(m_unused_i2p_sessions_mutex); + while (true) { + // Move first element of m_reconnections to todo (avoiding an allocation inside the lock). + decltype(m_reconnections) todo; + { + LOCK(m_reconnections_mutex); + if (m_reconnections.empty()) break; + todo.splice(todo.end(), m_reconnections, m_reconnections.begin()); + } + + auto& item = *todo.begin(); + OpenNetworkConnection(item.addr_connect, + // We only reconnect if the first attempt to connect succeeded at + // connection time, but then failed after the CNode object was + // created. Since we already know connecting is possible, do not + // count failure to reconnect. + /*fCountFailure=*/false, + std::move(item.grant), + item.destination.empty() ? nullptr : item.destination.c_str(), + item.conn_type, + item.use_v2transport); + } +} + +// Dump binary message to file, with timestamp. +static void CaptureMessageToFile(const CAddress& addr, + const std::string& msg_type, + Span<const unsigned char> data, + bool is_incoming) { // Note: This function captures the message at the time of processing, // not at socket receive/send time. @@ -94,11 +94,17 @@ static constexpr bool DEFAULT_FIXEDSEEDS{true}; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; +static constexpr bool DEFAULT_V2_TRANSPORT{false}; + typedef int64_t NodeId; -struct AddedNodeInfo -{ - std::string strAddedNode; +struct AddedNodeParams { + std::string m_added_node; + bool m_use_v2transport; +}; + +struct AddedNodeInfo { + AddedNodeParams m_params; CService resolvedAddress; bool fConnected; bool fInbound; @@ -151,7 +157,6 @@ enum LOCAL_MAX }; -bool IsPeerAddrLocalGood(CNode *pnode); /** Returns a local address that we should advertise to this peer. */ std::optional<CService> GetLocalAddrForPeer(CNode& node); @@ -170,7 +175,6 @@ bool AddLocal(const CNetAddr& addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService& addr); bool SeenLocal(const CService& addr); bool IsLocal(const CService& addr); -bool GetLocal(CService& addr, const CNode& peer); CService GetLocalAddress(const CNode& peer); CService MaybeFlipIPv6toCJDNS(const CService& service); @@ -228,6 +232,10 @@ public: Network m_network; uint32_t m_mapped_as; ConnectionType m_conn_type; + /** Transport protocol type. */ + TransportProtocolType m_transport_type; + /** BIP324 session id string in hex, if any. */ + std::string m_session_id; }; @@ -264,6 +272,15 @@ class Transport { public: virtual ~Transport() {} + struct Info + { + TransportProtocolType transport_type; + std::optional<uint256> session_id; + }; + + /** Retrieve information about this transport. */ + virtual Info GetInfo() const noexcept = 0; + // 1. Receiver side functions, for decoding bytes received on the wire into transport protocol // agnostic CNetMessage (message type & payload) objects. @@ -357,6 +374,11 @@ public: /** Return the memory usage of this transport attributable to buffered data to send. */ virtual size_t GetSendMemoryUsage() const noexcept = 0; + + // 3. Miscellaneous functions. + + /** Whether upon disconnections, a reconnect with V1 is warranted. */ + virtual bool ShouldReconnectV1() const noexcept = 0; }; class V1Transport final : public Transport @@ -417,6 +439,8 @@ public: return WITH_LOCK(m_recv_mutex, return CompleteInternal()); } + Info GetInfo() const noexcept override; + bool ReceivedBytes(Span<const uint8_t>& msg_bytes) override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex) { AssertLockNotHeld(m_recv_mutex); @@ -436,6 +460,7 @@ public: BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); + bool ShouldReconnectV1() const noexcept override { return false; } }; class V2Transport final : public Transport @@ -462,10 +487,10 @@ private: * * start(responder) * | - * | start(initiator) /---------\ - * | | | | - * v v v | - * KEY_MAYBE_V1 -> KEY -> GARB_GARBTERM -> GARBAUTH -> VERSION -> APP -> APP_READY + * | start(initiator) /---------\ + * | | | | + * v v v | + * KEY_MAYBE_V1 -> KEY -> GARB_GARBTERM -> VERSION -> APP -> APP_READY * | * \-------> V1 */ @@ -487,24 +512,19 @@ private: /** Garbage and garbage terminator. * * Whenever a byte is received, the last 16 bytes are compared with the expected garbage - * terminator. When that happens, the state becomes GARBAUTH. If no matching terminator is + * terminator. When that happens, the state becomes VERSION. If no matching terminator is * received in 4111 bytes (4095 for the maximum garbage length, and 16 bytes for the * terminator), the connection aborts. */ GARB_GARBTERM, - /** Garbage authentication packet. - * - * A packet is received, and decrypted/verified with AAD set to the garbage received during - * the GARB_GARBTERM state. If that succeeds, the state becomes VERSION. If it fails the - * connection aborts. */ - GARBAUTH, - /** Version packet. * - * A packet is received, and decrypted/verified. If that succeeds, the state becomes APP, - * and the decrypted contents is interpreted as version negotiation (currently, that means - * ignoring it, but it can be used for negotiating future extensions). If it fails, the - * connection aborts. */ + * A packet is received, and decrypted/verified. If that fails, the connection aborts. The + * first received packet in this state (whether it's a decoy or not) is expected to + * authenticate the garbage received during the GARB_GARBTERM state as associated + * authenticated data (AAD). The first non-decoy packet in this state is interpreted as + * version negotiation (currently, that means ignoring the contents, but it can be used for + * negotiating future extensions), and afterwards the state becomes APP. */ VERSION, /** Application packet. @@ -558,9 +578,9 @@ private: /** Normal sending state. * * In this state, the ciphers are initialized, so packets can be sent. When this state is - * entered, the garbage terminator, garbage authentication packet, and version - * packet are appended to the send buffer (in addition to the key and garbage which may - * still be there). In this state a message can be provided if the send buffer is empty. */ + * entered, the garbage terminator and version packet are appended to the send buffer (in + * addition to the key and garbage which may still be there). In this state a message can be + * provided if the send buffer is empty. */ READY, /** This transport is using v1 fallback. @@ -580,13 +600,13 @@ private: /** Lock for receiver-side fields. */ mutable Mutex m_recv_mutex ACQUIRED_BEFORE(m_send_mutex); - /** In {GARBAUTH, VERSION, APP}, the decrypted packet length, if m_recv_buffer.size() >= + /** In {VERSION, APP}, the decrypted packet length, if m_recv_buffer.size() >= * BIP324Cipher::LENGTH_LEN. Unspecified otherwise. */ uint32_t m_recv_len GUARDED_BY(m_recv_mutex) {0}; /** Receive buffer; meaning is determined by m_recv_state. */ std::vector<uint8_t> m_recv_buffer GUARDED_BY(m_recv_mutex); - /** During GARBAUTH, the garbage received during GARB_GARBTERM. */ - std::vector<uint8_t> m_recv_garbage GUARDED_BY(m_recv_mutex); + /** AAD expected in next received packet (currently used only for garbage). */ + std::vector<uint8_t> m_recv_aad GUARDED_BY(m_recv_mutex); /** Buffer to put decrypted contents in, for converting to CNetMessage. */ std::vector<uint8_t> m_recv_decode_buffer GUARDED_BY(m_recv_mutex); /** Deserialization type. */ @@ -609,6 +629,8 @@ private: std::string m_send_type GUARDED_BY(m_send_mutex); /** Current sender state. */ SendState m_send_state GUARDED_BY(m_send_mutex); + /** Whether we've sent at least 24 bytes (which would trigger disconnect for V1 peers). */ + bool m_sent_v1_header_worth GUARDED_BY(m_send_mutex) {false}; /** Change the receive state. */ void SetReceiveState(RecvState recv_state) noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex); @@ -626,7 +648,7 @@ private: bool ProcessReceivedKeyBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex, !m_send_mutex); /** Process bytes in m_recv_buffer, while in GARB_GARBTERM state. */ bool ProcessReceivedGarbageBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex); - /** Process bytes in m_recv_buffer, while in GARBAUTH/VERSION/APP state. */ + /** Process bytes in m_recv_buffer, while in VERSION/APP state. */ bool ProcessReceivedPacketBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex); public: @@ -654,6 +676,10 @@ public: BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); + + // Miscellaneous functions. + bool ShouldReconnectV1() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex, !m_send_mutex); + Info GetInfo() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex); }; struct CNodeOptions @@ -662,6 +688,7 @@ struct CNodeOptions std::unique_ptr<i2p::sam::Session> i2p_sam_session = nullptr; bool prefer_evict = false; size_t recv_flood_size{DEFAULT_MAXRECEIVEBUFFER * 1000}; + bool use_v2transport = false; }; /** Information about a peer */ @@ -706,6 +733,8 @@ public: // Bind address of our side of the connection const CAddress addrBind; const std::string m_addr_name; + /** The pszDest argument provided to ConnectNode(). Only used for reconnections. */ + const std::string m_dest; //! Whether this peer is an inbound onion, i.e. connected via our Tor onion service. const bool m_inbound_onion; std::atomic<int> nVersion{0}; @@ -834,6 +863,9 @@ public: */ Network ConnectedThroughNetwork() const; + /** Whether this peer connected through a privacy network. */ + [[nodiscard]] bool IsConnectedThroughPrivacyNet() const; + // We selected peer as (compact blocks) high-bandwidth peer (BIP152) std::atomic<bool> m_bip152_highbandwidth_to{false}; // Peer selected us as (compact blocks) high-bandwidth peer (BIP152) @@ -1076,7 +1108,11 @@ public: vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(m_added_nodes_mutex); - m_added_nodes = connOptions.m_added_nodes; + + for (const std::string& added_node : connOptions.m_added_nodes) { + // -addnode cli arg does not currently have a way to signal BIP324 support + m_added_node_params.push_back({added_node, false}); + } } m_onion_binds = connOptions.onion_binds; } @@ -1100,7 +1136,7 @@ public: bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); - void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); + void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char* strDest, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); bool CheckIncomingNonce(uint64_t nonce); // alias for thread safety annotations only, not defined @@ -1163,7 +1199,7 @@ public: // Count the number of block-relay-only peers we have over our limit. int GetExtraBlockRelayCount() const; - bool AddNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); + bool AddNode(const AddedNodeParams& add) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); bool RemoveAddedNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); std::vector<AddedNodeInfo> GetAddedNodeInfo() const EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); @@ -1246,10 +1282,10 @@ private: bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const Options& options); - void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex); + void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex); void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex); void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_unused_i2p_sessions_mutex); - void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex); + void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex); void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); void ThreadI2PAcceptIncoming(); void AcceptConnection(const ListenSocket& hListenSocket); @@ -1267,7 +1303,7 @@ private: const CAddress& addr_bind, const CAddress& addr); - void DisconnectNodes(); + void DisconnectNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_nodes_mutex); void NotifyNumConnectionsChanged(); /** Return true if the peer is inactive and should be disconnected. */ bool InactivityCheck(const CNode& node) const; @@ -1299,7 +1335,7 @@ private: */ void SocketHandlerListening(const Sock::EventsPerSock& events_per_sock); - void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc); + void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc, !m_nodes_mutex, !m_reconnections_mutex); void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex); uint64_t CalculateKeyedNetGroup(const CAddress& ad) const; @@ -1316,7 +1352,7 @@ private: bool AlreadyConnectedToAddress(const CAddress& addr); bool AttemptToEvictConnection(); - CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); + CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const; void DeleteNode(CNode* pnode); @@ -1388,7 +1424,10 @@ private: const NetGroupManager& m_netgroupman; std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); Mutex m_addr_fetches_mutex; - std::vector<std::string> m_added_nodes GUARDED_BY(m_added_nodes_mutex); + + // connection string and whether to use v2 p2p + std::vector<AddedNodeParams> m_added_node_params GUARDED_BY(m_added_nodes_mutex); + mutable Mutex m_added_nodes_mutex; std::vector<CNode*> m_nodes GUARDED_BY(m_nodes_mutex); std::list<CNode*> m_nodes_disconnected; @@ -1528,6 +1567,29 @@ private: std::queue<std::unique_ptr<i2p::sam::Session>> m_unused_i2p_sessions GUARDED_BY(m_unused_i2p_sessions_mutex); /** + * Mutex protecting m_reconnections. + */ + Mutex m_reconnections_mutex; + + /** Struct for entries in m_reconnections. */ + struct ReconnectionInfo + { + CAddress addr_connect; + CSemaphoreGrant grant; + std::string destination; + ConnectionType conn_type; + bool use_v2transport; + }; + + /** + * List of reconnections we have to make. + */ + std::list<ReconnectionInfo> m_reconnections GUARDED_BY(m_reconnections_mutex); + + /** Attempt reconnections, if m_reconnections non-empty. */ + void PerformReconnections() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_unused_i2p_sessions_mutex); + + /** * Cap on the size of `m_unused_i2p_sessions`, to ensure it does not * unexpectedly use too much memory. */ @@ -1575,12 +1637,6 @@ private: friend struct ConnmanTestMsg; }; -/** Dump binary message to file, with timestamp */ -void CaptureMessageToFile(const CAddress& addr, - const std::string& msg_type, - Span<const unsigned char> data, - bool is_incoming); - /** Defaults to `CaptureMessageToFile()`, but can be overridden by unit tests. */ extern std::function<void(const CAddress& addr, const std::string& msg_type, diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6b415b3a1e..06086d6804 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -18,6 +18,7 @@ #include <index/blockfilterindex.h> #include <kernel/mempool_entry.h> #include <logging.h> +#include <kernel/chain.h> #include <merkleblock.h> #include <netbase.h> #include <netmessagemaker.h> @@ -483,7 +484,7 @@ public: CTxMemPool& pool, Options opts); /** Overridden from CValidationInterface. */ - void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override + void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); @@ -892,6 +893,38 @@ private: */ void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Request blocks for the background chainstate, if one is in use. */ + void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** + * \brief Find next blocks to download from a peer after a starting block. + * + * \param vBlocks Vector of blocks to download which will be appended to. + * \param peer Peer which blocks will be downloaded from. + * \param state Pointer to the state of the peer. + * \param pindexWalk Pointer to the starting block to add to vBlocks. + * \param count Maximum number of blocks to allow in vBlocks. No more + * blocks will be added if it reaches this size. + * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No + * blocks will be added above this height. + * \param activeChain Optional pointer to a chain to compare against. If + * provided, any next blocks which are already contained + * in this chain will not be appended to vBlocks, but + * instead will be used to update the + * state->pindexLastCommonBlock pointer. + * \param nodeStaller Optional pointer to a NodeId variable that will receive + * the ID of another peer that might be causing this peer + * to stall. This is set to the ID of the peer which + * first requested the first in-flight block in the + * download window. It is only set if vBlocks is empty at + * the end of this function call and if increasing + * nWindowEnd by 1 would cause it to be non-empty (which + * indicates the download might be stalled because every + * block in the window is in flight and no other peer is + * trying to download the next block). + */ + void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /* Multimap used to preserve insertion order */ typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap; BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); @@ -1312,6 +1345,7 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash } } +// Logic for calculating which blocks to download from a given peer, given our current tip. void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) { if (count == 0) @@ -1341,12 +1375,47 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; - std::vector<const CBlockIndex*> vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to // download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; + + FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); +} + +void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) +{ + Assert(from_tip); + Assert(target_block); + + if (vBlocks.size() >= count) { + return; + } + + vBlocks.reserve(count); + CNodeState *state = Assert(State(peer.m_id)); + + if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { + // This peer can't provide us the complete series of blocks leading up to the + // assumeutxo snapshot base. + // + // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we + // will eventually crash when we try to reorg to it. Let other logic + // deal with whether we disconnect this peer. + // + // TODO at some point in the future, we might choose to request what blocks + // this peer does have from the historical chain, despite it not having a + // complete history beneath the snapshot base. + return; + } + + FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); +} + +void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) +{ + std::vector<const CBlockIndex*> vToFetch; int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { @@ -1374,8 +1443,8 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co // We wouldn't download this block or its descendants from this peer. return; } - if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) { - if (pindex->HaveTxsDownloaded()) + if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { + if (activeChain && pindex->HaveTxsDownloaded()) state->pindexLastCommonBlock = pindex; } else if (!IsBlockRequested(pindex->GetBlockHash())) { // The block is not already downloaded, and not yet in flight. @@ -1383,7 +1452,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != peer.m_id) { // We aren't able to fetch anything, but we would be if the download window was one larger. - nodeStaller = waitingfor; + if (nodeStaller) *nodeStaller = waitingfor; } return; } @@ -1415,8 +1484,8 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) const bool tx_relay{!RejectIncomingTxs(pnode)}; m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, - your_services, WithParams(CNetAddr::V1, addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) - my_services, WithParams(CNetAddr::V1, CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) + your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) + my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) nonce, strSubVersion, nNodeStartingHeight, tx_relay)); if (fLogIPs) { @@ -1843,11 +1912,30 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) * announcements for them. Also save the time of the last tip update and * possibly reduce dynamic block stalling timeout. */ -void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex) +void PeerManagerImpl::BlockConnected( + ChainstateRole role, + const std::shared_ptr<const CBlock>& pblock, + const CBlockIndex* pindex) { - m_orphanage.EraseForBlock(*pblock); + // Update this for all chainstate roles so that we don't mistakenly see peers + // helping us do background IBD as having a stale tip. m_last_tip_update = GetTime<std::chrono::seconds>(); + // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value + auto stalling_timeout = m_block_stalling_timeout.load(); + Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); + if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { + const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); + if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { + LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); + } + } + + if (role == ChainstateRole::BACKGROUND) { + return; + } + m_orphanage.EraseForBlock(*pblock); + { LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { @@ -1864,16 +1952,6 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); } } - - // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value - auto stalling_timeout = m_block_stalling_timeout.load(); - Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); - if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { - const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); - if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { - LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); - } - } } void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) @@ -3293,7 +3371,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, nTime = 0; } vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer - vRecv >> WithParams(CNetAddr::V1, addrMe); + vRecv >> CNetAddr::V1(addrMe); if (!pfrom.IsInboundConn()) { m_addrman.SetServices(pfrom.addr, nServices); @@ -3507,13 +3585,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - if (!pfrom.IsInboundConn()) { + // Log succesful connections unconditionally for outbound, but not for inbound as those + // can be triggered by an attacker at high rate. + if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s%s (%s)\n", + LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", + pfrom.ConnectionTypeAsString(), + TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""), - (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""), - pfrom.ConnectionTypeAsString()); + (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); } if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { @@ -5847,7 +5928,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector<const CBlockIndex*> vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller); + auto get_inflight_budget = [&state]() { + return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size())); + }; + + // If a snapshot chainstate is in use, we want to find its next blocks + // before the background chainstate to prioritize getting to network tip. + FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); + if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { + TryDownloadingHistoricalBlocks( + *peer, + get_inflight_budget(), + vToDownload, m_chainman.GetBackgroundSyncTip(), + Assert(m_chainman.GetSnapshotBaseBlock())); + } for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(*peer); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); diff --git a/src/netaddress.cpp b/src/netaddress.cpp index 4758f24680..7530334db1 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -309,10 +309,6 @@ bool CNetAddr::IsBindAny() const return std::all_of(m_addr.begin(), m_addr.end(), [](uint8_t b) { return b == 0; }); } -bool CNetAddr::IsIPv4() const { return m_net == NET_IPV4; } - -bool CNetAddr::IsIPv6() const { return m_net == NET_IPV6; } - bool CNetAddr::IsRFC1918() const { return IsIPv4() && ( @@ -400,22 +396,6 @@ bool CNetAddr::IsHeNet() const return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 4>{0x20, 0x01, 0x04, 0x70}); } -/** - * Check whether this object represents a TOR address. - * @see CNetAddr::SetSpecial(const std::string &) - */ -bool CNetAddr::IsTor() const { return m_net == NET_ONION; } - -/** - * Check whether this object represents an I2P address. - */ -bool CNetAddr::IsI2P() const { return m_net == NET_I2P; } - -/** - * Check whether this object represents a CJDNS address. - */ -bool CNetAddr::IsCJDNS() const { return m_net == NET_CJDNS; } - bool CNetAddr::IsLocal() const { // IPv4 loopback (127.0.0.0/8 or 0.0.0.0/8) @@ -450,8 +430,7 @@ bool CNetAddr::IsValid() const return false; } - // CJDNS addresses always start with 0xfc - if (IsCJDNS() && (m_addr[0] != 0xFC)) { + if (IsCJDNS() && !HasCJDNSPrefix()) { return false; } diff --git a/src/netaddress.h b/src/netaddress.h index a0944c886f..ad09f16799 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -154,8 +154,8 @@ public: bool SetSpecial(const std::string& addr); bool IsBindAny() const; // INADDR_ANY equivalent - bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) - bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor) + [[nodiscard]] bool IsIPv4() const { return m_net == NET_IPV4; } // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) + [[nodiscard]] bool IsIPv6() const { return m_net == NET_IPV6; } // IPv6 address (not mapped IPv4, not Tor) bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15) bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10) @@ -171,15 +171,23 @@ public: bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96) bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765) bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36) - bool IsTor() const; - bool IsI2P() const; - bool IsCJDNS() const; + [[nodiscard]] bool IsTor() const { return m_net == NET_ONION; } + [[nodiscard]] bool IsI2P() const { return m_net == NET_I2P; } + [[nodiscard]] bool IsCJDNS() const { return m_net == NET_CJDNS; } + [[nodiscard]] bool HasCJDNSPrefix() const { return m_addr[0] == 0xfc; } bool IsLocal() const; bool IsRoutable() const; bool IsInternal() const; bool IsValid() const; /** + * Whether this object is a privacy network. + * TODO: consider adding IsCJDNS() here when more peers adopt CJDNS, see: + * https://github.com/bitcoin/bitcoin/pull/27411#issuecomment-1497176155 + */ + [[nodiscard]] bool IsPrivacyNet() const { return IsTor() || IsI2P(); } + + /** * Check if the current object can be serialized in pre-ADDRv2/BIP155 format. */ bool IsAddrV1Compatible() const; @@ -218,6 +226,7 @@ public: }; struct SerParams { const Encoding enc; + SER_PARAMS_OPFUNC }; static constexpr SerParams V1{Encoding::V1}; static constexpr SerParams V2{Encoding::V2}; diff --git a/src/netmessagemaker.h b/src/netmessagemaker.h index 89fb4758f9..a121183aab 100644 --- a/src/netmessagemaker.h +++ b/src/netmessagemaker.h @@ -19,7 +19,7 @@ public: { CSerializedNetMsg msg; msg.m_type = std::move(msg_type); - CVectorWriter{ SER_NETWORK, nFlags | nVersion, msg.data, 0, std::forward<Args>(args)... }; + CVectorWriter{nFlags | nVersion, msg.data, 0, std::forward<Args>(args)...}; return msg; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index f21c94c0a0..5e61ed3100 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -10,6 +10,7 @@ #include <dbwrapper.h> #include <flatfile.h> #include <hash.h> +#include <kernel/chain.h> #include <kernel/chainparams.h> #include <kernel/messagestartchars.h> #include <logging.h> @@ -257,40 +258,56 @@ void BlockManager::PruneOneBlockFile(const int fileNumber) m_dirty_fileinfo.insert(fileNumber); } -void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height) +void BlockManager::FindFilesToPruneManual( + std::set<int>& setFilesToPrune, + int nManualPruneHeight, + const Chainstate& chain, + ChainstateManager& chainman) { assert(IsPruneMode() && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); - if (chain_tip_height < 0) { + if (chain.m_chain.Height() < 0) { return; } - // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip) - unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP); + const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); + int count = 0; - for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { - if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { + for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { + const auto& fileinfo = m_blockfile_info[fileNumber]; + if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { continue; } + PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } - LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); + LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", + chain.GetRole(), last_block_can_prune, count); } -void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) +void BlockManager::FindFilesToPrune( + std::set<int>& setFilesToPrune, + int last_prune, + const Chainstate& chain, + ChainstateManager& chainman) { LOCK2(cs_main, cs_LastBlockFile); - if (chain_tip_height < 0 || GetPruneTarget() == 0) { + // Distribute our -prune budget over all chainstates. + const auto target = std::max( + MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size()); + + if (chain.m_chain.Height() < 0 || target == 0) { return; } - if ((uint64_t)chain_tip_height <= nPruneAfterHeight) { + if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) { return; } - unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))}; + const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune); + uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files // So we should leave a buffer under our target to account for another allocation @@ -299,29 +316,31 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr uint64_t nBytesToPrune; int count = 0; - if (nCurrentUsage + nBuffer >= GetPruneTarget()) { + if (nCurrentUsage + nBuffer >= target) { // On a prune event, the chainstate DB is flushed. // To avoid excessive prune events negating the benefit of high dbcache // values, we should not prune too rapidly. // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon. - if (is_ibd) { + if (chainman.IsInitialBlockDownload()) { // Since this is only relevant during IBD, we use a fixed 10% - nBuffer += GetPruneTarget() / 10; + nBuffer += target / 10; } - for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { - nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize; + for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { + const auto& fileinfo = m_blockfile_info[fileNumber]; + nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; - if (m_blockfile_info[fileNumber].nSize == 0) { + if (fileinfo.nSize == 0) { continue; } - if (nCurrentUsage + nBuffer < GetPruneTarget()) { // are we below our target? + if (nCurrentUsage + nBuffer < target) { // are we below our target? break; } - // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning - if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { + // don't prune files that could have a block that's not within the allowable + // prune range for the chain being pruned. + if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { continue; } @@ -333,10 +352,10 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr } } - LogPrint(BCLog::PRUNE, "target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", - GetPruneTarget() / 1024 / 1024, nCurrentUsage / 1024 / 1024, - (int64_t(GetPruneTarget()) - int64_t(nCurrentUsage)) / 1024 / 1024, - nLastBlockWeCanPrune, count); + LogPrint(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n", + chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024, + (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024, + min_block_to_prune, last_block_can_prune, count); } void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) { @@ -360,13 +379,32 @@ CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash) return pindex; } -bool BlockManager::LoadBlockIndex() +bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash) { if (!m_block_tree_db->LoadBlockIndexGuts( GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) { return false; } + if (snapshot_blockhash) { + const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash)); + m_snapshot_height = au_data.height; + CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; + + // Since nChainTx (responsible for estimated progress) isn't persisted + // to disk, we must bootstrap the value for assumedvalid chainstates + // from the hardcoded assumeutxo chainparams. + base->nChainTx = au_data.nChainTx; + LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString()); + } else { + // If this isn't called with a snapshot blockhash, make sure the cached snapshot height + // is null. This is relevant during snapshot completion, when the blockman may be loaded + // with a height that then needs to be cleared after the snapshot is fully validated. + m_snapshot_height.reset(); + } + + Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value()); + // Calculate nChainWork std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()}; std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), @@ -383,7 +421,11 @@ bool BlockManager::LoadBlockIndex() // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { - if (pindex->pprev->nChainTx > 0) { + if (m_snapshot_height && pindex->nHeight == *m_snapshot_height && + pindex->GetBlockHash() == *snapshot_blockhash) { + // Should have been set above; don't disturb it with code below. + Assert(pindex->nChainTx > 0); + } else if (pindex->pprev->nChainTx > 0) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; @@ -420,27 +462,29 @@ bool BlockManager::WriteBlockIndexDB() vBlocks.push_back(*it); m_dirty_blockindex.erase(it++); } - if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) { + int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); + if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) { return false; } return true; } -bool BlockManager::LoadBlockIndexDB() +bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash) { - if (!LoadBlockIndex()) { + if (!LoadBlockIndex(snapshot_blockhash)) { return false; } + int max_blockfile_num{0}; // Load block file info - m_block_tree_db->ReadLastBlockFile(m_last_blockfile); - m_blockfile_info.resize(m_last_blockfile + 1); - LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile); - for (int nFile = 0; nFile <= m_last_blockfile; nFile++) { + m_block_tree_db->ReadLastBlockFile(max_blockfile_num); + m_blockfile_info.resize(max_blockfile_num + 1); + LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num); + for (int nFile = 0; nFile <= max_blockfile_num; nFile++) { m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); } - LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString()); - for (int nFile = m_last_blockfile + 1; true; nFile++) { + LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString()); + for (int nFile = max_blockfile_num + 1; true; nFile++) { CBlockFileInfo info; if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { m_blockfile_info.push_back(info); @@ -459,11 +503,20 @@ bool BlockManager::LoadBlockIndexDB() } for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) { FlatFilePos pos(*it, 0); - if (AutoFile{OpenBlockFile(pos, true)}.IsNull()) { + if (OpenBlockFile(pos, true).IsNull()) { return false; } } + { + // Initialize the blockfile cursors. + LOCK(cs_LastBlockFile); + for (size_t i = 0; i < m_blockfile_info.size(); ++i) { + const auto last_height_in_file = m_blockfile_info[i].nHeightLast; + m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0}; + } + } + // Check whether we have ever pruned block & undo files m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); if (m_have_pruned) { @@ -481,12 +534,13 @@ bool BlockManager::LoadBlockIndexDB() void BlockManager::ScanAndUnlinkAlreadyPrunedFiles() { AssertLockHeld(::cs_main); + int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); if (!m_have_pruned) { return; } std::set<int> block_files_to_prune; - for (int file_number = 0; file_number < m_last_blockfile; file_number++) { + for (int file_number = 0; file_number < max_blockfile; file_number++) { if (m_blockfile_info[file_number].nSize == 0) { block_files_to_prune.insert(file_number); } @@ -592,7 +646,7 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const { // Open history file to append - AutoFile fileout{OpenUndoFile(pos)}; + CAutoFile fileout{OpenUndoFile(pos)}; if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } @@ -627,7 +681,7 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in } // Open history file to read - AutoFile filein{OpenUndoFile(pos, true)}; + CAutoFile filein{OpenUndoFile(pos, true)}; if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } @@ -651,16 +705,19 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in return true; } -void BlockManager::FlushUndoFile(int block_file, bool finalize) +bool BlockManager::FlushUndoFile(int block_file, bool finalize) { FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); if (!UndoFileSeq().Flush(undo_pos_old, finalize)) { m_opts.notifications.flushError("Flushing undo file to disk failed. This is likely the result of an I/O error."); + return false; } + return true; } -void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) +bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo) { + bool success = true; LOCK(cs_LastBlockFile); if (m_blockfile_info.size() < 1) { @@ -668,17 +725,43 @@ void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which // then calls FlushStateToDisk()), resulting in a call to this function before we // have populated `m_blockfile_info` via LoadBlockIndexDB(). - return; + return true; } - assert(static_cast<int>(m_blockfile_info.size()) > m_last_blockfile); + assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num); - FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize); + FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize); if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error."); + success = false; } // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, // e.g. during IBD or a sync after a node going offline - if (!fFinalize || finalize_undo) FlushUndoFile(m_last_blockfile, finalize_undo); + if (!fFinalize || finalize_undo) { + if (!FlushUndoFile(blockfile_num, finalize_undo)) { + success = false; + } + } + return success; +} + +BlockfileType BlockManager::BlockfileTypeForHeight(int height) +{ + if (!m_snapshot_height) { + return BlockfileType::NORMAL; + } + return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL; +} + +bool BlockManager::FlushChainstateBlockFile(int tip_height) +{ + LOCK(cs_LastBlockFile); + auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)]; + if (cursor) { + // The cursor may not exist after a snapshot has been loaded but before any + // blocks have been downloaded. + return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); + } + return false; } uint64_t BlockManager::CalculateCurrentUsage() @@ -715,15 +798,15 @@ FlatFileSeq BlockManager::UndoFileSeq() const return FlatFileSeq(m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE); } -FILE* BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const +CAutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const { - return BlockFileSeq().Open(pos, fReadOnly); + return CAutoFile{BlockFileSeq().Open(pos, fReadOnly), CLIENT_VERSION}; } /** Open an undo file (rev?????.dat) */ -FILE* BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const +CAutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const { - return UndoFileSeq().Open(pos, fReadOnly); + return CAutoFile{UndoFileSeq().Open(pos, fReadOnly), CLIENT_VERSION}; } fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const @@ -735,8 +818,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne { LOCK(cs_LastBlockFile); - unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile; - if (m_blockfile_info.size() <= nFile) { + const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); + + if (!m_blockfile_cursors[chain_type]) { + // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. + assert(chain_type == BlockfileType::ASSUMED); + const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; + m_blockfile_cursors[chain_type] = new_cursor; + LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor); + } + const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; + + int nFile = fKnown ? pos.nFile : last_blockfile; + if (static_cast<int>(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } @@ -753,13 +847,20 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne } } assert(nAddSize < max_blockfile_size); + while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { // when the undo file is keeping up with the block file, we want to flush it explicitly // when it is lagging behind (more blocks arrive than are being connected), we let the // undo block write case handle it - finalize_undo = (m_blockfile_info[nFile].nHeightLast == m_undo_height_in_last_blockfile); - nFile++; - if (m_blockfile_info.size() <= nFile) { + finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) == + Assert(m_blockfile_cursors[chain_type])->undo_height); + + // Try the next unclaimed blockfile number + nFile = this->MaxBlockfileNum() + 1; + // Set to increment MaxBlockfileNum() for next iteration + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; + + if (static_cast<int>(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } } @@ -767,13 +868,26 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne pos.nPos = m_blockfile_info[nFile].nSize; } - if ((int)nFile != m_last_blockfile) { + if (nFile != last_blockfile) { if (!fKnown) { - LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString()); + LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", + last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); + } + + // Do not propagate the return code. The flush concerns a previous block + // and undo file that has already been written to. If a flush fails + // here, and we crash, there is no expected additional block data + // inconsistency arising from the flush failure here. However, the undo + // data may be inconsistent after a crash if the flush is called during + // a reindex. A flush error might also leave some of the data files + // untrimmed. + if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, + "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", + last_blockfile, !fKnown, finalize_undo, nFile); } - FlushBlockFile(!fKnown, finalize_undo); - m_last_blockfile = nFile; - m_undo_height_in_last_blockfile = 0; // No undo data yet in the new file, so reset our undo-height tracking. + // No undo data yet in the new file, so reset our undo-height tracking. + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); @@ -824,7 +938,7 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const { // Open history file to append - CAutoFile fileout{OpenBlockFile(pos), CLIENT_VERSION}; + CAutoFile fileout{OpenBlockFile(pos)}; if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } @@ -847,6 +961,9 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) { AssertLockHeld(::cs_main); + const BlockfileType type = BlockfileTypeForHeight(block.nHeight); + auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type])); + // Write undo information to disk if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; @@ -861,10 +978,17 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in // the FindBlockPos function - if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { - FlushUndoFile(_pos.nFile, true); - } else if (_pos.nFile == m_last_blockfile && static_cast<uint32_t>(block.nHeight) > m_undo_height_in_last_blockfile) { - m_undo_height_in_last_blockfile = block.nHeight; + if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { + // Do not propagate the return code, a failed flush here should not + // be an indication for a failed write. If it were propagated here, + // the caller would assume the undo data not to be written, when in + // fact it is. Note though, that a failed flush might leave the data + // file untrimmed. + if (!FlushUndoFile(_pos.nFile, true)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile); + } + } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { + cursor.undo_height = block.nHeight; } // update nUndoPos in block index block.nUndoPos = _pos.nPos; @@ -880,7 +1004,7 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) cons block.SetNull(); // Open history file to read - CAutoFile filein{OpenBlockFile(pos, true), CLIENT_VERSION}; + CAutoFile filein{OpenBlockFile(pos, true)}; if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } @@ -923,7 +1047,7 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatF { FlatFilePos hpos = pos; hpos.nPos -= 8; // Seek back 8 bytes for meta header - AutoFile filein{OpenBlockFile(hpos, true)}; + CAutoFile filein{OpenBlockFile(hpos, true)}; if (filein.IsNull()) { return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString()); } @@ -1015,8 +1139,8 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { break; // No block files left to reindex } - FILE* file = chainman.m_blockman.OpenBlockFile(pos, true); - if (!file) { + CAutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)}; + if (file.IsNull()) { break; // This error is logged in OpenBlockFile } LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); @@ -1036,8 +1160,8 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile // -loadblock= for (const fs::path& path : vImportFiles) { - FILE* file = fsbridge::fopen(path, "rb"); - if (file) { + CAutoFile file{fsbridge::fopen(path, "rb"), CLIENT_VERSION}; + if (!file.IsNull()) { LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); chainman.LoadExternalBlockFile(file); if (chainman.m_interrupt) { @@ -1063,4 +1187,18 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile } } // End scope of ImportingNow } + +std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { + switch(type) { + case BlockfileType::NORMAL: os << "normal"; break; + case BlockfileType::ASSUMED: os << "assumed"; break; + default: os.setstate(std::ios_base::failbit); + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) { + os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height); + return os; +} } // namespace node diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index b251ece31f..ac97728c05 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -9,6 +9,7 @@ #include <chain.h> #include <dbwrapper.h> #include <kernel/blockmanager_opts.h> +#include <kernel/chain.h> #include <kernel/chainparams.h> #include <kernel/cs_main.h> #include <kernel/messagestartchars.h> @@ -29,6 +30,7 @@ #include <vector> class BlockValidationState; +class CAutoFile; class CBlock; class CBlockFileInfo; class CBlockUndo; @@ -96,6 +98,35 @@ struct PruneLockInfo { int height_first{std::numeric_limits<int>::max()}; //! Height of earliest block that should be kept and not pruned }; +enum BlockfileType { + // Values used as array indexes - do not change carelessly. + NORMAL = 0, + ASSUMED = 1, + NUM_TYPES = 2, +}; + +std::ostream& operator<<(std::ostream& os, const BlockfileType& type); + +struct BlockfileCursor { + // The latest blockfile number. + int file_num{0}; + + // Track the height of the highest block in file_num whose undo + // data has been written. Block data is written to block files in download + // order, but is written to undo files in validation order, which is + // usually in order by height. To avoid wasting disk space, undo files will + // be trimmed whenever the corresponding block file is finalized and + // the height of the highest block written to the block file equals the + // height of the highest block written to the undo file. This is a + // heuristic and can sometimes preemptively trim undo files that will write + // more data later, and sometimes fail to trim undo files that can't have + // more data written later. + int undo_height{0}; +}; + +std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor); + + /** * Maintains a tree of blocks (stored in `m_block_index`) which is consulted * to determine where the most-work tip is. @@ -116,23 +147,33 @@ private: * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral * collections like m_dirty_blockindex. */ - bool LoadBlockIndex() + bool LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); - void FlushUndoFile(int block_file, bool finalize = false); - bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); + + /** Return false if block file or undo file flushing fails. */ + [[nodiscard]] bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo); + + /** Return false if undo file flushing fails. */ + [[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false); + + [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); + [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); FlatFileSeq BlockFileSeq() const; FlatFileSeq UndoFileSeq() const; - FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; + CAutoFile OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ - void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height); + void FindFilesToPruneManual( + std::set<int>& setFilesToPrune, + int nManualPruneHeight, + const Chainstate& chain, + ChainstateManager& chainman); /** * Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target. @@ -148,24 +189,39 @@ private: * A db flag records the fact that at least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned + * @param last_prune The last height we're able to prune, according to the prune locks */ - void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); + void FindFilesToPrune( + std::set<int>& setFilesToPrune, + int last_prune, + const Chainstate& chain, + ChainstateManager& chainman); RecursiveMutex cs_LastBlockFile; std::vector<CBlockFileInfo> m_blockfile_info; - int m_last_blockfile = 0; - // Track the height of the highest block in m_last_blockfile whose undo - // data has been written. Block data is written to block files in download - // order, but is written to undo files in validation order, which is - // usually in order by height. To avoid wasting disk space, undo files will - // be trimmed whenever the corresponding block file is finalized and - // the height of the highest block written to the block file equals the - // height of the highest block written to the undo file. This is a - // heuristic and can sometimes preemptively trim undo files that will write - // more data later, and sometimes fail to trim undo files that can't have - // more data written later. - unsigned int m_undo_height_in_last_blockfile = 0; + //! Since assumedvalid chainstates may be syncing a range of the chain that is very + //! far away from the normal/background validation process, we should segment blockfiles + //! for assumed chainstates. Otherwise, we might have wildly different height ranges + //! mixed into the same block files, which would impair our ability to prune + //! effectively. + //! + //! This data structure maintains separate blockfile number cursors for each + //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos(). + //! + //! The first element is the NORMAL cursor, second is ASSUMED. + std::array<std::optional<BlockfileCursor>, BlockfileType::NUM_TYPES> + m_blockfile_cursors GUARDED_BY(cs_LastBlockFile) = { + BlockfileCursor{}, + std::nullopt, + }; + int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile) + { + static const BlockfileCursor empty_cursor; + const auto& normal = m_blockfile_cursors[BlockfileType::NORMAL].value_or(empty_cursor); + const auto& assumed = m_blockfile_cursors[BlockfileType::ASSUMED].value_or(empty_cursor); + return std::max(normal.file_num, assumed.file_num); + } /** Global flag to indicate we should check to see if there are * block/undo files that should be deleted. Set on startup @@ -189,6 +245,8 @@ private: */ std::unordered_map<std::string, PruneLockInfo> m_prune_locks GUARDED_BY(::cs_main); + BlockfileType BlockfileTypeForHeight(int height); + const kernel::BlockManagerOpts m_opts; public: @@ -204,6 +262,20 @@ public: BlockMap m_block_index GUARDED_BY(cs_main); + /** + * The height of the base block of an assumeutxo snapshot, if one is in use. + * + * This controls how blockfiles are segmented by chainstate type to avoid + * comingling different height regions of the chain when an assumedvalid chainstate + * is in use. If heights are drastically different in the same blockfile, pruning + * suffers. + * + * This is set during ActivateSnapshot() or upon LoadBlockIndex() if a snapshot + * had been previously loaded. After the snapshot is validated, this is unset to + * restore normal LoadBlockIndex behavior. + */ + std::optional<int> m_snapshot_height; + std::vector<CBlockIndex*> GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** @@ -215,7 +287,8 @@ public: std::unique_ptr<BlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main); bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - bool LoadBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * Remove any pruned block & undo files that are still on disk. @@ -278,7 +351,7 @@ public: void UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Open a block file (blk?????.dat) */ - FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly = false) const; + CAutoFile OpenBlockFile(const FlatFilePos& pos, bool fReadOnly = false) const; /** Translation to a filesystem path */ fs::path GetBlockPosFilename(const FlatFilePos& pos) const; diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index ae1457a87e..16ca1d9156 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -185,7 +185,14 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize chainman.InitializeChainstate(options.mempool); // Load a chain created from a UTXO snapshot, if any exist. - chainman.DetectSnapshotChainstate(options.mempool); + bool has_snapshot = chainman.DetectSnapshotChainstate(options.mempool); + + if (has_snapshot && (options.reindex || options.reindex_chainstate)) { + LogPrintf("[snapshot] deleting snapshot chainstate due to reindexing\n"); + if (!chainman.DeleteSnapshotChainstate()) { + return {ChainstateLoadStatus::FAILURE_FATAL, Untranslated("Couldn't remove snapshot chainstate.")}; + } + } auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options); if (init_status != ChainstateLoadStatus::SUCCESS) { diff --git a/src/node/connection_types.cpp b/src/node/connection_types.cpp index 904f4371aa..5e4dc5bf2e 100644 --- a/src/node/connection_types.cpp +++ b/src/node/connection_types.cpp @@ -24,3 +24,17 @@ std::string ConnectionTypeAsString(ConnectionType conn_type) assert(false); } + +std::string TransportTypeAsString(TransportProtocolType transport_type) +{ + switch (transport_type) { + case TransportProtocolType::DETECTING: + return "detecting"; + case TransportProtocolType::V1: + return "v1"; + case TransportProtocolType::V2: + return "v2"; + } // no default case, so the compiler can warn about missing cases + + assert(false); +} diff --git a/src/node/connection_types.h b/src/node/connection_types.h index 5e1abcace6..a911b95f7e 100644 --- a/src/node/connection_types.h +++ b/src/node/connection_types.h @@ -6,6 +6,7 @@ #define BITCOIN_NODE_CONNECTION_TYPES_H #include <string> +#include <stdint.h> /** Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the @@ -79,4 +80,14 @@ enum class ConnectionType { /** Convert ConnectionType enum to a string value */ std::string ConnectionTypeAsString(ConnectionType conn_type); +/** Transport layer version */ +enum class TransportProtocolType : uint8_t { + DETECTING, //!< Peer could be v1 or v2 + V1, //!< Unencrypted, plaintext protocol + V2, //!< BIP324 protocol +}; + +/** Convert TransportProtocolType enum to a string value */ +std::string TransportTypeAsString(TransportProtocolType transport_type); + #endif // BITCOIN_NODE_CONNECTION_TYPES_H diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index a6d84555c0..4baa0da67c 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -28,6 +28,7 @@ #include <node/coin.h> #include <node/context.h> #include <node/interface_ui.h> +#include <node/mini_miner.h> #include <node/transaction.h> #include <policy/feerate.h> #include <policy/fees.h> @@ -433,9 +434,9 @@ public: { m_notifications->transactionRemovedFromMempool(tx, reason); } - void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* index) override + void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* index) override { - m_notifications->blockConnected(kernel::MakeBlockInfo(index, block.get())); + m_notifications->blockConnected(role, kernel::MakeBlockInfo(index, block.get())); } void BlockDisconnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* index) override { @@ -445,7 +446,9 @@ public: { m_notifications->updatedBlockTip(); } - void ChainStateFlushed(const CBlockLocator& locator) override { m_notifications->chainStateFlushed(locator); } + void ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) override { + m_notifications->chainStateFlushed(role, locator); + } std::shared_ptr<Chain::Notifications> m_notifications; }; @@ -665,6 +668,26 @@ public: if (!m_node.mempool) return; m_node.mempool->GetTransactionAncestry(txid, ancestors, descendants, ancestorsize, ancestorfees); } + + std::map<COutPoint, CAmount> CalculateIndividualBumpFees(const std::vector<COutPoint>& outpoints, const CFeeRate& target_feerate) override + { + if (!m_node.mempool) { + std::map<COutPoint, CAmount> bump_fees; + for (const auto& outpoint : outpoints) { + bump_fees.emplace(std::make_pair(outpoint, 0)); + } + return bump_fees; + } + return MiniMiner(*m_node.mempool, outpoints).CalculateBumpFees(target_feerate); + } + + std::optional<CAmount> CalculateCombinedBumpFee(const std::vector<COutPoint>& outpoints, const CFeeRate& target_feerate) override + { + if (!m_node.mempool) { + return 0; + } + return MiniMiner(*m_node.mempool, outpoints).CalculateTotalBumpFees(target_feerate); + } void getPackageLimits(unsigned int& limit_ancestor_count, unsigned int& limit_descendant_count) override { const CTxMemPool::Limits default_limits{}; diff --git a/src/node/mini_miner.cpp b/src/node/mini_miner.cpp index 6f253eddfa..2827242f96 100644 --- a/src/node/mini_miner.cpp +++ b/src/node/mini_miner.cpp @@ -7,9 +7,7 @@ #include <consensus/amount.h> #include <policy/feerate.h> #include <primitives/transaction.h> -#include <timedata.h> #include <util/check.h> -#include <util/moneystr.h> #include <algorithm> #include <numeric> @@ -171,9 +169,8 @@ void MiniMiner::DeleteAncestorPackage(const std::set<MockEntryMap::iterator, Ite for (auto& descendant : it->second) { // If these fail, we must be double-deducting. Assume(descendant->second.GetModFeesWithAncestors() >= anc->second.GetModifiedFee()); - Assume(descendant->second.vsize_with_ancestors >= anc->second.GetTxSize()); - descendant->second.fee_with_ancestors -= anc->second.GetModifiedFee(); - descendant->second.vsize_with_ancestors -= anc->second.GetTxSize(); + Assume(descendant->second.GetSizeWithAncestors() >= anc->second.GetTxSize()); + descendant->second.UpdateAncestorState(-anc->second.GetTxSize(), -anc->second.GetModifiedFee()); } } // Delete these entries. diff --git a/src/node/mini_miner.h b/src/node/mini_miner.h index db07e6d1bf..9d9d66bf0b 100644 --- a/src/node/mini_miner.h +++ b/src/node/mini_miner.h @@ -19,12 +19,13 @@ class MiniMinerMempoolEntry const CAmount fee_individual; const CTransactionRef tx; const int64_t vsize_individual; + CAmount fee_with_ancestors; + int64_t vsize_with_ancestors; // This class must be constructed while holding mempool.cs. After construction, the object's // methods can be called without holding that lock. + public: - CAmount fee_with_ancestors; - int64_t vsize_with_ancestors; explicit MiniMinerMempoolEntry(CTxMemPool::txiter entry) : fee_individual{entry->GetModifiedFee()}, tx{entry->GetSharedTx()}, @@ -38,6 +39,10 @@ public: int64_t GetTxSize() const { return vsize_individual; } int64_t GetSizeWithAncestors() const { return vsize_with_ancestors; } const CTransaction& GetTx() const LIFETIMEBOUND { return *tx; } + void UpdateAncestorState(int64_t vsize_change, CAmount fee_change) { + vsize_with_ancestors += vsize_change; + fee_with_ancestors += fee_change; + } }; // Comparator needed for std::set<MockEntryMap::iterator> diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index a901ef8f38..fd272a2642 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -23,10 +23,10 @@ bool CheckPackage(const Package& txns, PackageValidationState& state) return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); } - const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, - [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); - // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. - if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { + const int64_t total_weight = std::accumulate(txns.cbegin(), txns.cend(), 0, + [](int64_t sum, const auto& tx) { return sum + GetTransactionWeight(*tx); }); + // If the package only contains 1 tx, it's better to report the policy violation on individual tx weight. + if (package_count > 1 && total_weight > MAX_PACKAGE_WEIGHT) { return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); } diff --git a/src/policy/packages.h b/src/policy/packages.h index 0a0e7cf6bb..702667b8ad 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -15,18 +15,22 @@ /** Default maximum number of transactions in a package. */ static constexpr uint32_t MAX_PACKAGE_COUNT{25}; -/** Default maximum total virtual size of transactions in a package in KvB. */ -static constexpr uint32_t MAX_PACKAGE_SIZE{101}; -static_assert(MAX_PACKAGE_SIZE * WITNESS_SCALE_FACTOR * 1000 >= MAX_STANDARD_TX_WEIGHT); +/** Default maximum total weight of transactions in a package in weight + to allow for context-less checks. This must allow a superset of sigops + weighted vsize limited transactions to not disallow transactions we would + have otherwise accepted individually. */ +static constexpr uint32_t MAX_PACKAGE_WEIGHT = 404'000; +static_assert(MAX_PACKAGE_WEIGHT >= MAX_STANDARD_TX_WEIGHT); -// If a package is submitted, it must be within the mempool's ancestor/descendant limits. Since a -// submitted package must be child-with-unconfirmed-parents (all of the transactions are an ancestor +// If a package is to be evaluated, it must be at least as large as the mempool's ancestor/descendant limits, +// otherwise transactions that would be individually accepted may be rejected in a package erroneously. +// Since a submitted package must be child-with-unconfirmed-parents (all of the transactions are an ancestor // of the child), package limits are ultimately bounded by mempool package limits. Ensure that the // defaults reflect this constraint. static_assert(DEFAULT_DESCENDANT_LIMIT >= MAX_PACKAGE_COUNT); static_assert(DEFAULT_ANCESTOR_LIMIT >= MAX_PACKAGE_COUNT); -static_assert(DEFAULT_ANCESTOR_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE); -static_assert(DEFAULT_DESCENDANT_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE); +static_assert(MAX_PACKAGE_WEIGHT >= DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * WITNESS_SCALE_FACTOR * 1000); +static_assert(MAX_PACKAGE_WEIGHT >= DEFAULT_DESCENDANT_SIZE_LIMIT_KVB * WITNESS_SCALE_FACTOR * 1000); /** A "reason" why a package was invalid. It may be that one or more of the included * transactions is invalid or the package itself violates our rules. @@ -47,7 +51,7 @@ class PackageValidationState : public ValidationState<PackageValidationResult> { /** Context-free package policy checks: * 1. The number of transactions cannot exceed MAX_PACKAGE_COUNT. - * 2. The total virtual size cannot exceed MAX_PACKAGE_SIZE. + * 2. The total weight cannot exceed MAX_PACKAGE_WEIGHT. * 3. If any dependencies exist between transactions, parents must appear before children. * 4. Transactions cannot conflict, i.e., spend the same inputs. */ diff --git a/src/primitives/block.cpp b/src/primitives/block.cpp index 50a30cb511..3d21708820 100644 --- a/src/primitives/block.cpp +++ b/src/primitives/block.cpp @@ -10,7 +10,7 @@ uint256 CBlockHeader::GetHash() const { - return SerializeHash(*this); + return (CHashWriter{PROTOCOL_VERSION} << *this).GetHash(); } std::string CBlock::ToString() const diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index 3060746909..2c913bf432 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -67,12 +67,12 @@ CMutableTransaction::CMutableTransaction(const CTransaction& tx) : vin(tx.vin), uint256 CMutableTransaction::GetHash() const { - return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + return (CHashWriter{SERIALIZE_TRANSACTION_NO_WITNESS} << *this).GetHash(); } uint256 CTransaction::ComputeHash() const { - return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + return (CHashWriter{SERIALIZE_TRANSACTION_NO_WITNESS} << *this).GetHash(); } uint256 CTransaction::ComputeWitnessHash() const @@ -80,7 +80,7 @@ uint256 CTransaction::ComputeWitnessHash() const if (!HasWitness()) { return hash; } - return SerializeHash(*this, SER_GETHASH, 0); + return (CHashWriter{0} << *this).GetHash(); } CTransaction::CTransaction(const CMutableTransaction& tx) : vin(tx.vin), vout(tx.vout), nVersion(tx.nVersion), nLockTime(tx.nLockTime), hash{ComputeHash()}, m_witness_hash{ComputeWitnessHash()} {} diff --git a/src/protocol.cpp b/src/protocol.cpp index cb956191e4..f956728af2 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -199,6 +199,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_WITNESS: return "WITNESS"; case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS"; case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED"; + case NODE_P2P_V2: return "P2P_V2"; // Not using default, so we get warned when a case is missing } diff --git a/src/protocol.h b/src/protocol.h index 22e2108afb..a58d671a70 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -291,6 +291,9 @@ enum ServiceFlags : uint64_t { // See BIP159 for details on how this is implemented. NODE_NETWORK_LIMITED = (1 << 10), + // NODE_P2P_V2 means the node supports BIP324 transport + NODE_P2P_V2 = (1 << 11), + // Bits 24-31 are reserved for temporary experiments. Just pick a bit that // isn't getting used, or one not being used much, and notify the // bitcoin-development mailing list. Remember that service bits are just @@ -396,6 +399,7 @@ public: }; struct SerParams : CNetAddr::SerParams { const Format fmt; + SER_PARAMS_OPFUNC }; static constexpr SerParams V1_NETWORK{{CNetAddr::Encoding::V1}, Format::Network}; static constexpr SerParams V2_NETWORK{{CNetAddr::Encoding::V2}, Format::Network}; diff --git a/src/psbt.h b/src/psbt.h index 9464b10268..48e0453084 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -226,7 +226,7 @@ struct PSBTInput // Write the utxo if (non_witness_utxo) { SerializeToVector(s, CompactSizeWriter(PSBT_IN_NON_WITNESS_UTXO)); - OverrideStream<Stream> os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream<Stream> os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; SerializeToVector(os, non_witness_utxo); } if (!witness_utxo.IsNull()) { @@ -315,7 +315,7 @@ struct PSBTInput const auto& [leaf_hashes, origin] = leaf_origin; SerializeToVector(s, PSBT_IN_TAP_BIP32_DERIVATION, xonly); std::vector<unsigned char> value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; s_value << leaf_hashes; SerializeKeyOrigin(s_value, origin); s << value; @@ -381,7 +381,7 @@ struct PSBTInput } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -394,7 +394,7 @@ struct PSBTInput throw std::ios_base::failure("Non-witness utxo key is more than one byte type"); } // Set the stream to unserialize with witness since this is always a valid network transaction - OverrideStream<Stream> os(&s, s.GetType(), s.GetVersion() & ~SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream<Stream> os{&s, s.GetVersion() & ~SERIALIZE_TRANSACTION_NO_WITNESS}; UnserializeFromVector(os, non_witness_utxo); break; } @@ -590,7 +590,7 @@ struct PSBTInput } else if (key.size() != 65) { throw std::ios_base::failure("Input Taproot script signature key is not 65 bytes"); } - SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1)); + SpanReader s_key{s.GetVersion(), Span{key}.subspan(1)}; XOnlyPubKey xonly; uint256 hash; s_key >> xonly; @@ -632,7 +632,7 @@ struct PSBTInput } else if (key.size() != 33) { throw std::ios_base::failure("Input Taproot BIP32 keypath key is not at 33 bytes"); } - SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1)); + SpanReader s_key{s.GetVersion(), Span{key}.subspan(1)}; XOnlyPubKey xonly; s_key >> xonly; std::set<uint256> leaf_hashes; @@ -757,7 +757,7 @@ struct PSBTOutput if (!m_tap_tree.empty()) { SerializeToVector(s, PSBT_OUT_TAP_TREE); std::vector<unsigned char> value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; for (const auto& [depth, leaf_ver, script] : m_tap_tree) { s_value << depth; s_value << leaf_ver; @@ -771,7 +771,7 @@ struct PSBTOutput const auto& [leaf_hashes, origin] = leaf; SerializeToVector(s, PSBT_OUT_TAP_BIP32_DERIVATION, xonly); std::vector<unsigned char> value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; s_value << leaf_hashes; SerializeKeyOrigin(s_value, origin); s << value; @@ -807,7 +807,7 @@ struct PSBTOutput } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -856,7 +856,7 @@ struct PSBTOutput } std::vector<unsigned char> tree_v; s >> tree_v; - SpanReader s_tree(s.GetType(), s.GetVersion(), tree_v); + SpanReader s_tree{s.GetVersion(), tree_v}; if (s_tree.empty()) { throw std::ios_base::failure("Output Taproot tree must not be empty"); } @@ -984,7 +984,7 @@ struct PartiallySignedTransaction SerializeToVector(s, CompactSizeWriter(PSBT_GLOBAL_UNSIGNED_TX)); // Write serialized tx to a stream - OverrideStream<Stream> os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream<Stream> os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; SerializeToVector(os, *tx); // Write xpubs @@ -1061,7 +1061,7 @@ struct PartiallySignedTransaction } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -1075,7 +1075,7 @@ struct PartiallySignedTransaction } CMutableTransaction mtx; // Set the stream to serialize with non-witness since this should always be non-witness - OverrideStream<Stream> os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream<Stream> os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; UnserializeFromVector(os, mtx); tx = std::move(mtx); // Make sure that all scriptSigs and scriptWitnesses are empty diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp index 0a96be038b..246dff0069 100644 --- a/src/qt/askpassphrasedialog.cpp +++ b/src/qt/askpassphrasedialog.cpp @@ -167,6 +167,9 @@ void AskPassphraseDialog::accept() "passphrase to avoid this issue in the future.")); } } else { + if (m_passphrase_out) { + m_passphrase_out->assign(oldpass); + } QDialog::accept(); // Success } } catch (const std::runtime_error& e) { diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index b84cd02bda..2862dddb56 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -359,6 +359,10 @@ void BitcoinGUI::createActions() m_close_all_wallets_action = new QAction(tr("Close All Wallets…"), this); m_close_all_wallets_action->setStatusTip(tr("Close all wallets")); + m_migrate_wallet_action = new QAction(tr("Migrate Wallet"), this); + m_migrate_wallet_action->setEnabled(false); + m_migrate_wallet_action->setStatusTip(tr("Migrate a wallet")); + showHelpMessageAction = new QAction(tr("&Command-line options"), this); showHelpMessageAction->setMenuRole(QAction::NoRole); showHelpMessageAction->setStatusTip(tr("Show the %1 help message to get a list with possible Bitcoin command-line options").arg(PACKAGE_NAME)); @@ -459,6 +463,11 @@ void BitcoinGUI::createActions() connect(m_close_all_wallets_action, &QAction::triggered, [this] { m_wallet_controller->closeAllWallets(this); }); + connect(m_migrate_wallet_action, &QAction::triggered, [this] { + auto activity = new MigrateWalletActivity(m_wallet_controller, this); + connect(activity, &MigrateWalletActivity::migrated, this, &BitcoinGUI::setCurrentWallet); + activity->migrate(walletFrame->currentWalletModel()); + }); connect(m_mask_values_action, &QAction::toggled, this, &BitcoinGUI::setPrivacy); connect(m_mask_values_action, &QAction::toggled, this, &BitcoinGUI::enableHistoryAction); } @@ -486,6 +495,7 @@ void BitcoinGUI::createMenuBar() file->addAction(m_open_wallet_action); file->addAction(m_close_wallet_action); file->addAction(m_close_all_wallets_action); + file->addAction(m_migrate_wallet_action); file->addSeparator(); file->addAction(backupWalletAction); file->addAction(m_restore_wallet_action); @@ -770,6 +780,7 @@ void BitcoinGUI::setCurrentWallet(WalletModel* wallet_model) } } updateWindowTitle(); + m_migrate_wallet_action->setEnabled(wallet_model->wallet().isLegacy()); } void BitcoinGUI::setCurrentWalletBySelectorIndex(int index) @@ -803,6 +814,7 @@ void BitcoinGUI::setWalletActionsEnabled(bool enabled) openAction->setEnabled(enabled); m_close_wallet_action->setEnabled(enabled); m_close_all_wallets_action->setEnabled(enabled); + m_migrate_wallet_action->setEnabled(enabled); } void BitcoinGUI::createTrayIcon() diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index 4e1f05255a..510561454b 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -163,6 +163,8 @@ private: QAction* m_wallet_selector_label_action = nullptr; QAction* m_wallet_selector_action = nullptr; QAction* m_mask_values_action{nullptr}; + QAction* m_migrate_wallet_action{nullptr}; + QMenu* m_migrate_wallet_menu{nullptr}; QLabel *m_wallet_selector_label = nullptr; QComboBox* m_wallet_selector = nullptr; diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp index 5b3c8bcf48..3e8be3e675 100644 --- a/src/qt/createwalletdialog.cpp +++ b/src/qt/createwalletdialog.cpp @@ -58,10 +58,7 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : ui->descriptor_checkbox->setChecked(checked); ui->encrypt_wallet_checkbox->setChecked(false); ui->disable_privkeys_checkbox->setChecked(checked); - // The blank check box is ambiguous. This flag is always true for a - // watch-only wallet, even though we immedidately fetch keys from the - // external signer. - ui->blank_wallet_checkbox->setChecked(checked); + ui->blank_wallet_checkbox->setChecked(false); }); connect(ui->disable_privkeys_checkbox, &QCheckBox::toggled, [this](bool checked) { @@ -69,9 +66,10 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : // set to true, enable it when isDisablePrivateKeysChecked is false. ui->encrypt_wallet_checkbox->setEnabled(!checked); - // Wallets without private keys start out blank + // Wallets without private keys cannot set blank + ui->blank_wallet_checkbox->setEnabled(!checked); if (checked) { - ui->blank_wallet_checkbox->setChecked(true); + ui->blank_wallet_checkbox->setChecked(false); } // When the encrypt_wallet_checkbox is disabled, uncheck it. @@ -81,8 +79,11 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : }); connect(ui->blank_wallet_checkbox, &QCheckBox::toggled, [this](bool checked) { - if (!checked) { - ui->disable_privkeys_checkbox->setChecked(false); + // Disable the disable_privkeys_checkbox when blank_wallet_checkbox is checked + // as blank-ness only pertains to wallets with private keys. + ui->disable_privkeys_checkbox->setEnabled(!checked); + if (checked) { + ui->disable_privkeys_checkbox->setChecked(false); } }); diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 1d2f6484fb..8a2ec4cd59 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -13,6 +13,7 @@ #include <QDateTime> +using wallet::ISMINE_NO; using wallet::ISMINE_SPENDABLE; using wallet::ISMINE_WATCH_ONLY; using wallet::isminetype; @@ -39,99 +40,52 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const interface uint256 hash = wtx.tx->GetHash(); std::map<std::string, std::string> mapValue = wtx.value_map; - if (nNet > 0 || wtx.is_coinbase) - { - // - // Credit - // - for(unsigned int i = 0; i < wtx.tx->vout.size(); i++) - { - const CTxOut& txout = wtx.tx->vout[i]; - isminetype mine = wtx.txout_is_mine[i]; - if(mine) - { - TransactionRecord sub(hash, nTime); - sub.idx = i; // vout index - sub.credit = txout.nValue; - sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY; - if (wtx.txout_address_is_mine[i]) - { - // Received by Bitcoin Address - sub.type = TransactionRecord::RecvWithAddress; - sub.address = EncodeDestination(wtx.txout_address[i]); - } - else - { - // Received by IP connection (deprecated features), or a multisignature or other non-simple transaction - sub.type = TransactionRecord::RecvFromOther; - sub.address = mapValue["from"]; - } - if (wtx.is_coinbase) - { - // Generated - sub.type = TransactionRecord::Generated; - } - - parts.append(sub); - } - } - } - else - { - bool involvesWatchAddress = false; - isminetype fAllFromMe = ISMINE_SPENDABLE; + bool involvesWatchAddress = false; + isminetype fAllFromMe = ISMINE_SPENDABLE; + bool any_from_me = false; + if (wtx.is_coinbase) { + fAllFromMe = ISMINE_NO; + } else { for (const isminetype mine : wtx.txin_is_mine) { if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; if(fAllFromMe > mine) fAllFromMe = mine; + if (mine) any_from_me = true; } + } - isminetype fAllToMe = ISMINE_SPENDABLE; + if (fAllFromMe || !any_from_me) { for (const isminetype mine : wtx.txout_is_mine) { if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; - if(fAllToMe > mine) fAllToMe = mine; } - if (fAllFromMe && fAllToMe) - { - // Payment to self - std::string address; - for (auto it = wtx.txout_address.begin(); it != wtx.txout_address.end(); ++it) { - if (it != wtx.txout_address.begin()) address += ", "; - address += EncodeDestination(*it); - } + CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); - CAmount nChange = wtx.change; - parts.append(TransactionRecord(hash, nTime, TransactionRecord::SendToSelf, address, -(nDebit - nChange), nCredit - nChange)); - parts.last().involvesWatchAddress = involvesWatchAddress; // maybe pass to TransactionRecord as constructor argument - } - else if (fAllFromMe) + for(unsigned int i = 0; i < wtx.tx->vout.size(); i++) { - // - // Debit - // - CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); - - for (unsigned int nOut = 0; nOut < wtx.tx->vout.size(); nOut++) - { - const CTxOut& txout = wtx.tx->vout[nOut]; - TransactionRecord sub(hash, nTime); - sub.idx = nOut; - sub.involvesWatchAddress = involvesWatchAddress; + const CTxOut& txout = wtx.tx->vout[i]; - if(wtx.txout_is_mine[nOut]) - { - // Ignore parts sent to self, as this is usually the change - // from a transaction sent back to our own address. + if (fAllFromMe) { + // Change is only really possible if we're the sender + // Otherwise, someone just sent bitcoins to a change address, which should be shown + if (wtx.txout_is_change[i]) { continue; } - if (!std::get_if<CNoDestination>(&wtx.txout_address[nOut])) + // + // Debit + // + + TransactionRecord sub(hash, nTime); + sub.idx = i; + sub.involvesWatchAddress = involvesWatchAddress; + + if (!std::get_if<CNoDestination>(&wtx.txout_address[i])) { // Sent to Bitcoin Address sub.type = TransactionRecord::SendToAddress; - sub.address = EncodeDestination(wtx.txout_address[nOut]); + sub.address = EncodeDestination(wtx.txout_address[i]); } else { @@ -151,15 +105,45 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const interface parts.append(sub); } + + isminetype mine = wtx.txout_is_mine[i]; + if(mine) + { + // + // Credit + // + + TransactionRecord sub(hash, nTime); + sub.idx = i; // vout index + sub.credit = txout.nValue; + sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY; + if (wtx.txout_address_is_mine[i]) + { + // Received by Bitcoin Address + sub.type = TransactionRecord::RecvWithAddress; + sub.address = EncodeDestination(wtx.txout_address[i]); + } + else + { + // Received by IP connection (deprecated features), or a multisignature or other non-simple transaction + sub.type = TransactionRecord::RecvFromOther; + sub.address = mapValue["from"]; + } + if (wtx.is_coinbase) + { + // Generated + sub.type = TransactionRecord::Generated; + } + + parts.append(sub); + } } - else - { - // - // Mixed debit transaction, can't break down payees - // - parts.append(TransactionRecord(hash, nTime, TransactionRecord::Other, "", nNet, 0)); - parts.last().involvesWatchAddress = involvesWatchAddress; - } + } else { + // + // Mixed debit transaction, can't break down payees + // + parts.append(TransactionRecord(hash, nTime, TransactionRecord::Other, "", nNet, 0)); + parts.last().involvesWatchAddress = involvesWatchAddress; } return parts; @@ -170,11 +154,21 @@ void TransactionRecord::updateStatus(const interfaces::WalletTxStatus& wtx, cons // Determine transaction status // Sort order, unrecorded transactions sort to the top - status.sortKey = strprintf("%010d-%01d-%010u-%03d", + int typesort; + switch (type) { + case SendToAddress: case SendToOther: + typesort = 2; break; + case RecvWithAddress: case RecvFromOther: + typesort = 3; break; + default: + typesort = 9; + } + status.sortKey = strprintf("%010d-%01d-%010u-%03d-%d", wtx.block_height, wtx.is_coinbase ? 1 : 0, wtx.time_received, - idx); + idx, + typesort); status.countsForBalance = wtx.is_trusted && !(wtx.blocks_to_maturity > 0); status.depth = wtx.depth_in_main_chain; status.m_cur_block_hash = block_hash; diff --git a/src/qt/transactionrecord.h b/src/qt/transactionrecord.h index 36cfb422e8..21b1bc0e01 100644 --- a/src/qt/transactionrecord.h +++ b/src/qt/transactionrecord.h @@ -69,7 +69,6 @@ public: SendToOther, RecvWithAddress, RecvFromOther, - SendToSelf }; /** Number of confirmation recommended for accepting a transaction */ diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index 25d54bdce6..486e51c777 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -17,6 +17,7 @@ #include <core_io.h> #include <interfaces/handler.h> +#include <tinyformat.h> #include <uint256.h> #include <algorithm> @@ -377,8 +378,6 @@ QString TransactionTableModel::formatTxType(const TransactionRecord *wtx) const case TransactionRecord::SendToAddress: case TransactionRecord::SendToOther: return tr("Sent to"); - case TransactionRecord::SendToSelf: - return tr("Payment to yourself"); case TransactionRecord::Generated: return tr("Mined"); default: @@ -421,8 +420,6 @@ QString TransactionTableModel::formatTxToAddress(const TransactionRecord *wtx, b return lookupAddress(wtx->address, tooltip) + watchAddress; case TransactionRecord::SendToOther: return QString::fromStdString(wtx->address) + watchAddress; - case TransactionRecord::SendToSelf: - return lookupAddress(wtx->address, tooltip) + watchAddress; default: return tr("(n/a)") + watchAddress; } @@ -441,8 +438,6 @@ QVariant TransactionTableModel::addressColor(const TransactionRecord *wtx) const if(label.isEmpty()) return COLOR_BAREADDRESS; } break; - case TransactionRecord::SendToSelf: - return COLOR_BAREADDRESS; default: break; } @@ -560,7 +555,7 @@ QVariant TransactionTableModel::data(const QModelIndex &index, int role) const case Status: return QString::fromStdString(rec->status.sortKey); case Date: - return rec->time; + return QString::fromStdString(strprintf("%020s-%s", rec->time, rec->status.sortKey)); case Type: return formatTxType(rec); case Watchonly: diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 351305f3fa..67af62285d 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -91,7 +91,6 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa TransactionFilterProxy::TYPE(TransactionRecord::RecvFromOther)); typeWidget->addItem(tr("Sent to"), TransactionFilterProxy::TYPE(TransactionRecord::SendToAddress) | TransactionFilterProxy::TYPE(TransactionRecord::SendToOther)); - typeWidget->addItem(tr("To yourself"), TransactionFilterProxy::TYPE(TransactionRecord::SendToSelf)); typeWidget->addItem(tr("Mined"), TransactionFilterProxy::TYPE(TransactionRecord::Generated)); typeWidget->addItem(tr("Other"), TransactionFilterProxy::TYPE(TransactionRecord::Other)); diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index 8c8abf0e90..ca2fa2d672 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -435,3 +435,67 @@ void RestoreWalletActivity::finish() Q_EMIT finished(); } + +void MigrateWalletActivity::migrate(WalletModel* wallet_model) +{ + // Warn the user about migration + QMessageBox box(m_parent_widget); + box.setWindowTitle(tr("Migrate wallet")); + box.setText(tr("Are you sure you wish to migrate the wallet <i>%1</i>?").arg(GUIUtil::HtmlEscape(wallet_model->getDisplayName()))); + box.setInformativeText(tr("Migrating the wallet will convert this wallet to one or more descriptor wallets. A new wallet backup will need to be made.\n" + "If this wallet contains any watchonly scripts, a new wallet will be created which contains those watchonly scripts.\n" + "If this wallet contains any solvable but not watched scripts, a different and new wallet will be created which contains those scripts.\n\n" + "The migration process will create a backup of the wallet before migrating. This backup file will be named " + "<wallet name>-<timestamp>.legacy.bak and can be found in the directory for this wallet. In the event of " + "an incorrect migration, the backup can be restored with the \"Restore Wallet\" functionality.")); + box.setStandardButtons(QMessageBox::Yes|QMessageBox::Cancel); + box.setDefaultButton(QMessageBox::Yes); + if (box.exec() != QMessageBox::Yes) return; + + // Get the passphrase if it is encrypted regardless of it is locked or unlocked. We need the passphrase itself. + SecureString passphrase; + WalletModel::EncryptionStatus enc_status = wallet_model->getEncryptionStatus(); + if (enc_status == WalletModel::EncryptionStatus::Locked || enc_status == WalletModel::EncryptionStatus::Unlocked) { + AskPassphraseDialog dlg(AskPassphraseDialog::Unlock, m_parent_widget, &passphrase); + dlg.setModel(wallet_model); + dlg.exec(); + } + + // GUI needs to remove the wallet so that it can actually be unloaded by migration + const std::string name = wallet_model->wallet().getWalletName(); + m_wallet_controller->removeAndDeleteWallet(wallet_model); + + showProgressDialog(tr("Migrate Wallet"), tr("Migrating Wallet <b>%1</b>…").arg(GUIUtil::HtmlEscape(name))); + + QTimer::singleShot(0, worker(), [this, name, passphrase] { + auto res{node().walletLoader().migrateWallet(name, passphrase)}; + + if (res) { + m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(res->wallet->getWalletName())); + if (res->watchonly_wallet_name) { + m_success_message += tr(" Watchonly scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(res->watchonly_wallet_name.value())); + } + if (res->solvables_wallet_name) { + m_success_message += tr(" Solvable but not watched scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(res->solvables_wallet_name.value())); + } + m_wallet_model = m_wallet_controller->getOrCreateWallet(std::move(res->wallet)); + } else { + m_error_message = util::ErrorString(res); + } + + QTimer::singleShot(0, this, &MigrateWalletActivity::finish); + }); +} + +void MigrateWalletActivity::finish() +{ + if (!m_error_message.empty()) { + QMessageBox::critical(m_parent_widget, tr("Migration failed"), QString::fromStdString(m_error_message.translated)); + } else { + QMessageBox::information(m_parent_widget, tr("Migration Successful"), m_success_message); + } + + if (m_wallet_model) Q_EMIT migrated(m_wallet_model); + + Q_EMIT finished(); +} diff --git a/src/qt/walletcontroller.h b/src/qt/walletcontroller.h index 8ec0243890..c595ba998d 100644 --- a/src/qt/walletcontroller.h +++ b/src/qt/walletcontroller.h @@ -40,6 +40,7 @@ class path; class AskPassphraseDialog; class CreateWalletActivity; class CreateWalletDialog; +class MigrateWalletActivity; class OpenWalletActivity; class WalletControllerActivity; @@ -65,6 +66,8 @@ public: void closeWallet(WalletModel* wallet_model, QWidget* parent = nullptr); void closeAllWallets(QWidget* parent = nullptr); + void migrateWallet(WalletModel* wallet_model, QWidget* parent = nullptr); + Q_SIGNALS: void walletAdded(WalletModel* wallet_model); void walletRemoved(WalletModel* wallet_model); @@ -83,6 +86,7 @@ private: std::unique_ptr<interfaces::Handler> m_handler_load_wallet; friend class WalletControllerActivity; + friend class MigrateWalletActivity; }; class WalletControllerActivity : public QObject @@ -175,4 +179,22 @@ private: void finish(); }; +class MigrateWalletActivity : public WalletControllerActivity +{ + Q_OBJECT + +public: + MigrateWalletActivity(WalletController* wallet_controller, QWidget* parent) : WalletControllerActivity(wallet_controller, parent) {} + + void migrate(WalletModel* wallet_model); + +Q_SIGNALS: + void migrated(WalletModel* wallet_model); + +private: + QString m_success_message; + + void finish(); +}; + #endif // BITCOIN_QT_WALLETCONTROLLER_H diff --git a/src/qt/winshutdownmonitor.cpp b/src/qt/winshutdownmonitor.cpp index 386d593eea..97a9ec318c 100644 --- a/src/qt/winshutdownmonitor.cpp +++ b/src/qt/winshutdownmonitor.cpp @@ -43,7 +43,7 @@ bool WinShutdownMonitor::nativeEventFilter(const QByteArray &eventType, void *pM void WinShutdownMonitor::registerShutdownBlockReason(const QString& strReason, const HWND& mainWinId) { typedef BOOL (WINAPI *PSHUTDOWNBRCREATE)(HWND, LPCWSTR); - PSHUTDOWNBRCREATE shutdownBRCreate = (PSHUTDOWNBRCREATE)GetProcAddress(GetModuleHandleA("User32.dll"), "ShutdownBlockReasonCreate"); + PSHUTDOWNBRCREATE shutdownBRCreate = (PSHUTDOWNBRCREATE)(void*)GetProcAddress(GetModuleHandleA("User32.dll"), "ShutdownBlockReasonCreate"); if (shutdownBRCreate == nullptr) { qWarning() << "registerShutdownBlockReason: GetProcAddress for ShutdownBlockReasonCreate failed"; return; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f4d88e4209..0f4941b40c 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -8,6 +8,7 @@ #include <blockfilter.h> #include <chain.h> #include <chainparams.h> +#include <clientversion.h> #include <coins.h> #include <common/args.h> #include <consensus/amount.h> @@ -2699,6 +2700,178 @@ UniValue CreateUTXOSnapshot( return result; } +static RPCHelpMan loadtxoutset() +{ + return RPCHelpMan{ + "loadtxoutset", + "Load the serialized UTXO set from disk.\n" + "Once this snapshot is loaded, its contents will be " + "deserialized into a second chainstate data structure, which is then used to sync to " + "the network's tip under a security model very much like `assumevalid`. " + "Meanwhile, the original chainstate will complete the initial block download process in " + "the background, eventually validating up to the block that the snapshot is based upon.\n\n" + + "The result is a usable bitcoind instance that is current with the network tip in a " + "matter of minutes rather than hours. UTXO snapshot are typically obtained from " + "third-party sources (HTTP, torrent, etc.) which is reasonable since their " + "contents are always checked by hash.\n\n" + + "You can find more information on this process in the `assumeutxo` design " + "document (<https://github.com/bitcoin/bitcoin/blob/master/doc/design/assumeutxo.md>).", + { + {"path", + RPCArg::Type::STR, + RPCArg::Optional::NO, + "path to the snapshot file. If relative, will be prefixed by datadir."}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::NUM, "coins_loaded", "the number of coins loaded from the snapshot"}, + {RPCResult::Type::STR_HEX, "tip_hash", "the hash of the base of the snapshot"}, + {RPCResult::Type::NUM, "base_height", "the height of the base of the snapshot"}, + {RPCResult::Type::STR, "path", "the absolute path that the snapshot was loaded from"}, + } + }, + RPCExamples{ + HelpExampleCli("loadtxoutset", "utxo.dat") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + NodeContext& node = EnsureAnyNodeContext(request.context); + fs::path path{AbsPathForConfigVal(EnsureArgsman(node), fs::u8path(request.params[0].get_str()))}; + + FILE* file{fsbridge::fopen(path, "rb")}; + AutoFile afile{file}; + if (afile.IsNull()) { + throw JSONRPCError( + RPC_INVALID_PARAMETER, + "Couldn't open file " + path.u8string() + " for reading."); + } + + SnapshotMetadata metadata; + afile >> metadata; + + uint256 base_blockhash = metadata.m_base_blockhash; + int max_secs_to_wait_for_headers = 60 * 10; + CBlockIndex* snapshot_start_block = nullptr; + + LogPrintf("[snapshot] waiting to see blockheader %s in headers chain before snapshot activation\n", + base_blockhash.ToString()); + + ChainstateManager& chainman = *node.chainman; + + while (max_secs_to_wait_for_headers > 0) { + snapshot_start_block = WITH_LOCK(::cs_main, + return chainman.m_blockman.LookupBlockIndex(base_blockhash)); + max_secs_to_wait_for_headers -= 1; + + if (!IsRPCRunning()) { + throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); + } + + if (!snapshot_start_block) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } else { + break; + } + } + + if (!snapshot_start_block) { + LogPrintf("[snapshot] timed out waiting for snapshot start blockheader %s\n", + base_blockhash.ToString()); + throw JSONRPCError( + RPC_INTERNAL_ERROR, + "Timed out waiting for base block header to appear in headers chain"); + } + if (!chainman.ActivateSnapshot(afile, metadata, false)) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to load UTXO snapshot " + fs::PathToString(path)); + } + CBlockIndex* new_tip{WITH_LOCK(::cs_main, return chainman.ActiveTip())}; + + UniValue result(UniValue::VOBJ); + result.pushKV("coins_loaded", metadata.m_coins_count); + result.pushKV("tip_hash", new_tip->GetBlockHash().ToString()); + result.pushKV("base_height", new_tip->nHeight); + result.pushKV("path", fs::PathToString(path)); + return result; +}, + }; +} + +const std::vector<RPCResult> RPCHelpForChainstate{ + {RPCResult::Type::NUM, "blocks", "number of blocks in this chainstate"}, + {RPCResult::Type::STR_HEX, "bestblockhash", "blockhash of the tip"}, + {RPCResult::Type::NUM, "difficulty", "difficulty of the tip"}, + {RPCResult::Type::NUM, "verificationprogress", "progress towards the network tip"}, + {RPCResult::Type::STR_HEX, "snapshot_blockhash", /*optional=*/true, "the base block of the snapshot this chainstate is based on, if any"}, + {RPCResult::Type::NUM, "coins_db_cache_bytes", "size of the coinsdb cache"}, + {RPCResult::Type::NUM, "coins_tip_cache_bytes", "size of the coinstip cache"}, +}; + +static RPCHelpMan getchainstates() +{ +return RPCHelpMan{ + "getchainstates", + "\nReturn information about chainstates.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ, "", "", { + {RPCResult::Type::NUM, "headers", "the number of headers seen so far"}, + {RPCResult::Type::OBJ, "normal", /*optional=*/true, "fully validated chainstate containing blocks this node has validated starting from the genesis block", RPCHelpForChainstate}, + {RPCResult::Type::OBJ, "snapshot", /*optional=*/true, "only present if an assumeutxo snapshot is loaded. Partially validated chainstate containing blocks this node has validated starting from the snapshot. After the snapshot is validated (when the 'normal' chainstate advances far enough to validate it), this chainstate will replace and become the 'normal' chainstate.", RPCHelpForChainstate}, + } + }, + RPCExamples{ + HelpExampleCli("getchainstates", "") + + HelpExampleRpc("getchainstates", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + LOCK(cs_main); + UniValue obj(UniValue::VOBJ); + + NodeContext& node = EnsureAnyNodeContext(request.context); + ChainstateManager& chainman = *node.chainman; + + auto make_chain_data = [&](const Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + AssertLockHeld(::cs_main); + UniValue data(UniValue::VOBJ); + if (!cs.m_chain.Tip()) { + return data; + } + const CChain& chain = cs.m_chain; + const CBlockIndex* tip = chain.Tip(); + + data.pushKV("blocks", (int)chain.Height()); + data.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); + data.pushKV("difficulty", (double)GetDifficulty(tip)); + data.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip)); + data.pushKV("coins_db_cache_bytes", cs.m_coinsdb_cache_size_bytes); + data.pushKV("coins_tip_cache_bytes", cs.m_coinstip_cache_size_bytes); + if (cs.m_from_snapshot_blockhash) { + data.pushKV("snapshot_blockhash", cs.m_from_snapshot_blockhash->ToString()); + } + return data; + }; + + if (chainman.GetAll().size() > 1) { + for (Chainstate* chainstate : chainman.GetAll()) { + obj.pushKV( + chainstate->m_from_snapshot_blockhash ? "snapshot" : "normal", + make_chain_data(*chainstate)); + } + } else { + obj.pushKV("normal", make_chain_data(chainman.ActiveChainstate())); + } + obj.pushKV("headers", chainman.m_best_header ? chainman.m_best_header->nHeight : -1); + + return obj; +} + }; +} + + void RegisterBlockchainRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -2722,13 +2895,15 @@ void RegisterBlockchainRPCCommands(CRPCTable& t) {"blockchain", &scantxoutset}, {"blockchain", &scanblocks}, {"blockchain", &getblockfilter}, + {"blockchain", &dumptxoutset}, + {"blockchain", &loadtxoutset}, + {"blockchain", &getchainstates}, {"hidden", &invalidateblock}, {"hidden", &reconsiderblock}, {"hidden", &waitfornewblock}, {"hidden", &waitforblock}, {"hidden", &waitforblockheight}, {"hidden", &syncwithvalidationinterfacequeue}, - {"hidden", &dumptxoutset}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 0ee3f27761..49820f25a3 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -263,13 +263,13 @@ static const CRPCConvertParam vRPCConvertParams[] = { "bumpfee", 1, "fee_rate"}, { "bumpfee", 1, "replaceable"}, { "bumpfee", 1, "outputs"}, - { "bumpfee", 1, "reduce_output"}, + { "bumpfee", 1, "original_change_index"}, { "psbtbumpfee", 1, "options" }, { "psbtbumpfee", 1, "conf_target"}, { "psbtbumpfee", 1, "fee_rate"}, { "psbtbumpfee", 1, "replaceable"}, { "psbtbumpfee", 1, "outputs"}, - { "psbtbumpfee", 1, "reduce_output"}, + { "psbtbumpfee", 1, "original_change_index"}, { "logging", 0, "include" }, { "logging", 1, "exclude" }, { "disconnectnode", 1, "nodeid" }, @@ -301,6 +301,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "addpeeraddress", 2, "tried"}, { "sendmsgtopeer", 0, "peer_id" }, { "stop", 0, "wait" }, + { "addnode", 2, "v2transport" }, }; // clang-format on diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 377e9de0e8..705608bd47 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -862,7 +862,7 @@ static RPCHelpMan submitpackage() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - if (!Params().IsMockableChain()) { + if (Params().GetChainType() != ChainType::REGTEST) { throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only"); } const UniValue raw_transactions = request.params[0].get_array(); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index f7b6c68344..8d796b8e9b 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -45,6 +45,12 @@ const std::vector<std::string> CONNECTION_TYPE_DOC{ "feeler (short-lived automatic connection for testing addresses)" }; +const std::vector<std::string> TRANSPORT_TYPE_DOC{ + "detecting (peer could be v1 or v2)", + "v1 (plaintext transport protocol)", + "v2 (BIP324 encrypted transport protocol)" +}; + static RPCHelpMan getconnectioncount() { return RPCHelpMan{"getconnectioncount", @@ -164,6 +170,8 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + ".\n" "Please note this output is unlikely to be stable in upcoming releases as we iterate to\n" "best capture connection behaviors."}, + {RPCResult::Type::STR, "transport_protocol_type", "Type of transport protocol: \n" + Join(TRANSPORT_TYPE_DOC, ",\n") + ".\n"}, + {RPCResult::Type::STR, "session_id", "The session ID for this connection, or \"\" if there is none (\"v2\" transport protocol only).\n"}, }}, }}, }, @@ -268,6 +276,8 @@ static RPCHelpMan getpeerinfo() } obj.pushKV("bytesrecv_per_msg", recvPerMsgType); obj.pushKV("connection_type", ConnectionTypeAsString(stats.m_conn_type)); + obj.pushKV("transport_protocol_type", TransportTypeAsString(stats.m_transport_type)); + obj.pushKV("session_id", stats.m_session_id); ret.push_back(obj); } @@ -287,20 +297,19 @@ static RPCHelpMan addnode() strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) + " and are counted separately from the -maxconnections limit.\n", { - {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"}, + {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The address of the peer to connect to"}, {"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"}, + {"v2transport", RPCArg::Type::BOOL, RPCArg::Default{false}, "Attempt to connect using BIP324 v2 transport protocol (ignored for 'remove' command)"}, }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{ - HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\"") - + HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\"") + HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\" true") + + HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\" true") }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - std::string strCommand; - if (!request.params[1].isNull()) - strCommand = request.params[1].get_str(); - if (strCommand != "onetry" && strCommand != "add" && strCommand != "remove") { + const std::string command{request.params[1].get_str()}; + if (command != "onetry" && command != "add" && command != "remove") { throw std::runtime_error( self.ToString()); } @@ -308,24 +317,29 @@ static RPCHelpMan addnode() NodeContext& node = EnsureAnyNodeContext(request.context); CConnman& connman = EnsureConnman(node); - std::string strNode = request.params[0].get_str(); + const std::string node_arg{request.params[0].get_str()}; + bool use_v2transport = self.Arg<bool>(2); + + if (use_v2transport && !(node.connman->GetLocalServices() & NODE_P2P_V2)) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Error: v2transport requested but not enabled (see -v2transport)"); + } - if (strCommand == "onetry") + if (command == "onetry") { CAddress addr; - connman.OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL); + connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grant_outbound=*/{}, node_arg.c_str(), ConnectionType::MANUAL, use_v2transport); return UniValue::VNULL; } - if (strCommand == "add") + if (command == "add") { - if (!connman.AddNode(strNode)) { + if (!connman.AddNode({node_arg, use_v2transport})) { throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added"); } } - else if(strCommand == "remove") + else if (command == "remove") { - if (!connman.RemoveAddedNode(strNode)) { + if (!connman.RemoveAddedNode(node_arg)) { throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node could not be removed. It has not been added previously."); } } @@ -477,7 +491,7 @@ static RPCHelpMan getaddednodeinfo() if (!request.params[0].isNull()) { bool found = false; for (const AddedNodeInfo& info : vInfo) { - if (info.strAddedNode == request.params[0].get_str()) { + if (info.m_params.m_added_node == request.params[0].get_str()) { vInfo.assign(1, info); found = true; break; @@ -492,7 +506,7 @@ static RPCHelpMan getaddednodeinfo() for (const AddedNodeInfo& info : vInfo) { UniValue obj(UniValue::VOBJ); - obj.pushKV("addednode", info.strAddedNode); + obj.pushKV("addednode", info.m_params.m_added_node); obj.pushKV("connected", info.fConnected); UniValue addresses(UniValue::VARR); if (info.fConnected) { @@ -1016,6 +1030,55 @@ static RPCHelpMan sendmsgtopeer() }; } +static RPCHelpMan getaddrmaninfo() +{ + return RPCHelpMan{"getaddrmaninfo", + "\nProvides information about the node's address manager by returning the number of " + "addresses in the `new` and `tried` tables and their sum for all networks.\n" + "This RPC is for testing only.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ_DYN, "", "json object with network type as keys", + { + {RPCResult::Type::OBJ, "network", "the network (" + Join(GetNetworkNames(), ", ") + ")", + { + {RPCResult::Type::NUM, "new", "number of addresses in the new table, which represent potential peers the node has discovered but hasn't yet successfully connected to."}, + {RPCResult::Type::NUM, "tried", "number of addresses in the tried table, which represent peers the node has successfully connected to in the past."}, + {RPCResult::Type::NUM, "total", "total number of addresses in both new/tried tables"}, + }}, + } + }, + RPCExamples{ + HelpExampleCli("getaddrmaninfo", "") + + HelpExampleRpc("getaddrmaninfo", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + NodeContext& node = EnsureAnyNodeContext(request.context); + if (!node.addrman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Address manager functionality missing or disabled"); + } + + UniValue ret(UniValue::VOBJ); + for (int n = 0; n < NET_MAX; ++n) { + enum Network network = static_cast<enum Network>(n); + if (network == NET_UNROUTABLE || network == NET_INTERNAL) continue; + UniValue obj(UniValue::VOBJ); + obj.pushKV("new", node.addrman->Size(network, true)); + obj.pushKV("tried", node.addrman->Size(network, false)); + obj.pushKV("total", node.addrman->Size(network)); + ret.pushKV(GetNetworkName(network), obj); + } + UniValue obj(UniValue::VOBJ); + obj.pushKV("new", node.addrman->Size(std::nullopt, true)); + obj.pushKV("tried", node.addrman->Size(std::nullopt, false)); + obj.pushKV("total", node.addrman->Size()); + ret.pushKV("all_networks", obj); + return ret; + }, + }; +} + void RegisterNetRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -1035,6 +1098,7 @@ void RegisterNetRPCCommands(CRPCTable& t) {"hidden", &addconnection}, {"hidden", &addpeeraddress}, {"hidden", &sendmsgtopeer}, + {"hidden", &getaddrmaninfo}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/rpc/output_script.cpp b/src/rpc/output_script.cpp index 4dd424fa14..f9343f48a8 100644 --- a/src/rpc/output_script.cpp +++ b/src/rpc/output_script.cpp @@ -280,6 +280,11 @@ static RPCHelpMan deriveaddresses() for (const CScript& script : scripts) { CTxDestination dest; if (!ExtractDestination(script, dest)) { + // ExtractDestination no longer returns true for P2PK since it doesn't have a corresponding address + // However combo will output P2PK and should just ignore that script + if (scripts.size() > 1 && std::get_if<PubKeyDestination>(&dest)) { + continue; + } throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Descriptor does not have a corresponding address"); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index fa5dd281a1..31ca126862 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1949,6 +1949,7 @@ RPCHelpMan descriptorprocesspsbt() { {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction"}, {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"}, + {RPCResult::Type::STR_HEX, "hex", /*optional=*/true, "The hex-encoded network transaction if complete"}, } }, RPCExamples{ @@ -1989,7 +1990,14 @@ RPCHelpMan descriptorprocesspsbt() result.pushKV("psbt", EncodeBase64(ssTx)); result.pushKV("complete", complete); - + if (complete) { + CMutableTransaction mtx; + PartiallySignedTransaction psbtx_copy = psbtx; + CHECK_NONFATAL(FinalizeAndExtractPSBT(psbtx_copy, mtx)); + CDataStream ssTx_final(SER_NETWORK, PROTOCOL_VERSION); + ssTx_final << mtx; + result.pushKV("hex", HexStr(ssTx_final)); + } return result; }, }; diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 74ef04033e..a11366bd47 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -253,6 +253,11 @@ public: return UniValue(UniValue::VOBJ); } + UniValue operator()(const PubKeyDestination& dest) const + { + return UniValue(UniValue::VOBJ); + } + UniValue operator()(const PKHash& keyID) const { UniValue obj(UniValue::VOBJ); @@ -303,8 +308,8 @@ public: { UniValue obj(UniValue::VOBJ); obj.pushKV("iswitness", true); - obj.pushKV("witness_version", (int)id.version); - obj.pushKV("witness_program", HexStr({id.program, id.length})); + obj.pushKV("witness_version", id.GetWitnessVersion()); + obj.pushKV("witness_program", HexStr(id.GetWitnessProgram())); return obj; } }; @@ -677,6 +682,7 @@ TMPL_INST(nullptr, std::optional<bool>, maybe_arg ? std::optional{maybe_arg->get TMPL_INST(nullptr, const std::string*, maybe_arg ? &maybe_arg->get_str() : nullptr;); // Required arg or optional arg with default value. +TMPL_INST(CheckRequiredOrDefault, bool, CHECK_NONFATAL(maybe_arg)->get_bool();); TMPL_INST(CheckRequiredOrDefault, int, CHECK_NONFATAL(maybe_arg)->getInt<int>();); TMPL_INST(CheckRequiredOrDefault, uint64_t, CHECK_NONFATAL(maybe_arg)->getInt<uint64_t>();); TMPL_INST(CheckRequiredOrDefault, const std::string&, CHECK_NONFATAL(maybe_arg)->get_str();); diff --git a/src/serialize.h b/src/serialize.h index f1595077e9..e53ff9fa4c 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -131,7 +131,6 @@ enum // primary actions SER_NETWORK = (1 << 0), SER_DISK = (1 << 1), - SER_GETHASH = (1 << 2), }; /** @@ -167,9 +166,9 @@ const Out& AsBase(const In& x) return x; } -#define READWRITE(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) -#define SER_READ(obj, code) ::SerRead(s, ser_action, obj, [&](Stream& s, typename std::remove_const<Type>::type& obj) { code; }) -#define SER_WRITE(obj, code) ::SerWrite(s, ser_action, obj, [&](Stream& s, const Type& obj) { code; }) +#define READWRITE(...) (ser_action.SerReadWriteMany(s, __VA_ARGS__)) +#define SER_READ(obj, code) ser_action.SerRead(s, obj, [&](Stream& s, typename std::remove_const<Type>::type& obj) { code; }) +#define SER_WRITE(obj, code) ser_action.SerWrite(s, obj, [&](Stream& s, const Type& obj) { code; }) /** * Implement the Ser and Unser methods needed for implementing a formatter (see Using below). @@ -1008,17 +1007,65 @@ void Unserialize(Stream& is, std::shared_ptr<const T>& p) p = std::make_shared<const T>(deserialize, is); } +/** + * Support for (un)serializing many things at once + */ + +template <typename Stream, typename... Args> +void SerializeMany(Stream& s, const Args&... args) +{ + (::Serialize(s, args), ...); +} + +template <typename Stream, typename... Args> +inline void UnserializeMany(Stream& s, Args&&... args) +{ + (::Unserialize(s, args), ...); +} /** * Support for all macros providing or using the ser_action parameter of the SerializationOps method. */ struct ActionSerialize { - constexpr bool ForRead() const { return false; } + static constexpr bool ForRead() { return false; } + + template<typename Stream, typename... Args> + static void SerReadWriteMany(Stream& s, const Args&... args) + { + ::SerializeMany(s, args...); + } + + template<typename Stream, typename Type, typename Fn> + static void SerRead(Stream& s, Type&&, Fn&&) + { + } + + template<typename Stream, typename Type, typename Fn> + static void SerWrite(Stream& s, Type&& obj, Fn&& fn) + { + fn(s, std::forward<Type>(obj)); + } }; struct ActionUnserialize { - constexpr bool ForRead() const { return true; } -}; + static constexpr bool ForRead() { return true; } + template<typename Stream, typename... Args> + static void SerReadWriteMany(Stream& s, Args&&... args) + { + ::UnserializeMany(s, args...); + } + + template<typename Stream, typename Type, typename Fn> + static void SerRead(Stream& s, Type&& obj, Fn&& fn) + { + fn(s, std::forward<Type>(obj)); + } + + template<typename Stream, typename Type, typename Fn> + static void SerWrite(Stream& s, Type&&, Fn&&) + { + } +}; /* ::GetSerializeSize implementations * @@ -1065,52 +1112,6 @@ public: int GetVersion() const { return nVersion; } }; -template <typename Stream, typename... Args> -void SerializeMany(Stream& s, const Args&... args) -{ - (::Serialize(s, args), ...); -} - -template <typename Stream, typename... Args> -inline void UnserializeMany(Stream& s, Args&&... args) -{ - (::Unserialize(s, args), ...); -} - -template<typename Stream, typename... Args> -inline void SerReadWriteMany(Stream& s, ActionSerialize ser_action, const Args&... args) -{ - ::SerializeMany(s, args...); -} - -template<typename Stream, typename... Args> -inline void SerReadWriteMany(Stream& s, ActionUnserialize ser_action, Args&&... args) -{ - ::UnserializeMany(s, args...); -} - -template<typename Stream, typename Type, typename Fn> -inline void SerRead(Stream& s, ActionSerialize ser_action, Type&&, Fn&&) -{ -} - -template<typename Stream, typename Type, typename Fn> -inline void SerRead(Stream& s, ActionUnserialize ser_action, Type&& obj, Fn&& fn) -{ - fn(s, std::forward<Type>(obj)); -} - -template<typename Stream, typename Type, typename Fn> -inline void SerWrite(Stream& s, ActionSerialize ser_action, Type&& obj, Fn&& fn) -{ - fn(s, std::forward<Type>(obj)); -} - -template<typename Stream, typename Type, typename Fn> -inline void SerWrite(Stream& s, ActionUnserialize ser_action, Type&&, Fn&&) -{ -} - template<typename I> inline void WriteVarInt(CSizeComputer &s, I n) { @@ -1161,12 +1162,11 @@ public: template <typename Params, typename T> class ParamsWrapper { - static_assert(std::is_lvalue_reference<T>::value, "ParamsWrapper needs an lvalue reference type T"); const Params& m_params; - T m_object; + T& m_object; public: - explicit ParamsWrapper(const Params& params, T obj) : m_params{params}, m_object{obj} {} + explicit ParamsWrapper(const Params& params, T& obj) : m_params{params}, m_object{obj} {} template <typename Stream> void Serialize(Stream& s) const @@ -1190,7 +1190,20 @@ public: template <typename Params, typename T> static auto WithParams(const Params& params, T&& t) { - return ParamsWrapper<Params, T&>{params, t}; + return ParamsWrapper<Params, T>{params, t}; } +/** + * Helper macro for SerParams structs + * + * Allows you define SerParams instances and then apply them directly + * to an object via function call syntax, eg: + * + * constexpr SerParams FOO{....}; + * ss << FOO(obj); + */ +#define SER_PARAMS_OPFUNC \ + template <typename T> \ + auto operator()(T&& t) const { return WithParams(*this, t); } + #endif // BITCOIN_SERIALIZE_H diff --git a/src/signet.cpp b/src/signet.cpp index 21b289b637..ef0faaa5f8 100644 --- a/src/signet.cpp +++ b/src/signet.cpp @@ -98,7 +98,7 @@ std::optional<SignetTxs> SignetTxs::Create(const CBlock& block, const CScript& c // no signet solution -- allow this to support OP_TRUE as trivial block challenge } else { try { - SpanReader v{SER_NETWORK, INIT_PROTO_VERSION, signet_solution}; + SpanReader v{INIT_PROTO_VERSION, signet_solution}; v >> tx_spending.vin[0].scriptSig; v >> tx_spending.vin[0].scriptWitness.stack; if (!v.empty()) return std::nullopt; // extraneous data encountered @@ -109,7 +109,7 @@ std::optional<SignetTxs> SignetTxs::Create(const CBlock& block, const CScript& c uint256 signet_merkle = ComputeModifiedMerkleRoot(modified_cb, block); std::vector<uint8_t> block_data; - CVectorWriter writer(SER_NETWORK, INIT_PROTO_VERSION, block_data, 0); + CVectorWriter writer{INIT_PROTO_VERSION, block_data, 0}; writer << block.nVersion; writer << block.hashPrevBlock; writer << signet_merkle; diff --git a/src/streams.h b/src/streams.h index f9a817c9b6..d58de5233b 100644 --- a/src/streams.h +++ b/src/streams.h @@ -50,11 +50,10 @@ class OverrideStream { Stream* stream; - const int nType; const int nVersion; public: - OverrideStream(Stream* stream_, int nType_, int nVersion_) : stream(stream_), nType(nType_), nVersion(nVersion_) {} + OverrideStream(Stream* stream_, int nVersion_) : stream{stream_}, nVersion{nVersion_} {} template<typename T> OverrideStream<Stream>& operator<<(const T& obj) @@ -81,7 +80,6 @@ public: } int GetVersion() const { return nVersion; } - int GetType() const { return nType; } size_t size() const { return stream->size(); } void ignore(size_t size) { return stream->ignore(size); } }; @@ -95,13 +93,12 @@ class CVectorWriter public: /* - * @param[in] nTypeIn Serialization Type * @param[in] nVersionIn Serialization Version (including any flags) * @param[in] vchDataIn Referenced byte vector to overwrite/append * @param[in] nPosIn Starting position. Vector index where writes should start. The vector will initially * grow as necessary to max(nPosIn, vec.size()). So to append, use vec.size(). */ - CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn) : nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn), nPos(nPosIn) + CVectorWriter(int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn) : nVersion{nVersionIn}, vchData{vchDataIn}, nPos{nPosIn} { if(nPos > vchData.size()) vchData.resize(nPos); @@ -111,7 +108,7 @@ class CVectorWriter * @param[in] args A list of items to serialize starting at nPosIn. */ template <typename... Args> - CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn) + CVectorWriter(int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter{nVersionIn, vchDataIn, nPosIn} { ::SerializeMany(*this, std::forward<Args>(args)...); } @@ -137,12 +134,8 @@ class CVectorWriter { return nVersion; } - int GetType() const - { - return nType; - } + private: - const int nType; const int nVersion; std::vector<unsigned char>& vchData; size_t nPos; @@ -153,19 +146,16 @@ private: class SpanReader { private: - const int m_type; const int m_version; Span<const unsigned char> m_data; public: - /** - * @param[in] type Serialization Type * @param[in] version Serialization Version (including any flags) * @param[in] data Referenced byte vector to overwrite/append */ - SpanReader(int type, int version, Span<const unsigned char> data) - : m_type(type), m_version(version), m_data(data) {} + SpanReader(int version, Span<const unsigned char> data) + : m_version{version}, m_data{data} {} template<typename T> SpanReader& operator>>(T&& obj) @@ -175,7 +165,6 @@ public: } int GetVersion() const { return m_version; } - int GetType() const { return m_type; } size_t size() const { return m_data.size(); } bool empty() const { return m_data.empty(); } @@ -571,7 +560,7 @@ public: } }; -/** Non-refcounted RAII wrapper around a FILE* that implements a ring buffer to +/** Wrapper around a CAutoFile& that implements a ring buffer to * deserialize from. It guarantees the ability to rewind a given number of bytes. * * Will automatically close the file when it goes out of scope if not null. @@ -580,9 +569,7 @@ public: class BufferedFile { private: - const int nVersion; - - FILE *src; //!< source file + CAutoFile& m_src; uint64_t nSrcPos{0}; //!< how many bytes have been read from source uint64_t m_read_pos{0}; //!< how many bytes have been read from this uint64_t nReadLimit; //!< up to which position we're allowed to read @@ -598,9 +585,9 @@ private: readNow = nAvail; if (readNow == 0) return false; - size_t nBytes = fread((void*)&vchBuf[pos], 1, readNow, src); + size_t nBytes{m_src.detail_fread(Span{vchBuf}.subspan(pos, readNow))}; if (nBytes == 0) { - throw std::ios_base::failure(feof(src) ? "BufferedFile::Fill: end of file" : "BufferedFile::Fill: fread failed"); + throw std::ios_base::failure{m_src.feof() ? "BufferedFile::Fill: end of file" : "BufferedFile::Fill: fread failed"}; } nSrcPos += nBytes; return true; @@ -629,36 +616,18 @@ private: } public: - BufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nVersionIn) - : nVersion{nVersionIn}, nReadLimit{std::numeric_limits<uint64_t>::max()}, nRewind{nRewindIn}, vchBuf(nBufSize, std::byte{0}) + BufferedFile(CAutoFile& file, uint64_t nBufSize, uint64_t nRewindIn) + : m_src{file}, nReadLimit{std::numeric_limits<uint64_t>::max()}, nRewind{nRewindIn}, vchBuf(nBufSize, std::byte{0}) { if (nRewindIn >= nBufSize) throw std::ios_base::failure("Rewind limit must be less than buffer size"); - src = fileIn; - } - - ~BufferedFile() - { - fclose(); } - // Disallow copies - BufferedFile(const BufferedFile&) = delete; - BufferedFile& operator=(const BufferedFile&) = delete; - - int GetVersion() const { return nVersion; } - - void fclose() - { - if (src) { - ::fclose(src); - src = nullptr; - } - } + int GetVersion() const { return m_src.GetVersion(); } //! check whether we're at the end of the source file bool eof() const { - return m_read_pos == nSrcPos && feof(src); + return m_read_pos == nSrcPos && m_src.feof(); } //! read a number of bytes diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 558f835f11..4395567722 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -57,4 +57,28 @@ struct secure_allocator { // TODO: Consider finding a way to make incoming RPC request.params[i] mlock()ed as well typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString; +template<typename T> +struct SecureUniqueDeleter { + void operator()(T* t) noexcept { + secure_allocator<T>().deallocate(t, 1); + } +}; + +template<typename T> +using secure_unique_ptr = std::unique_ptr<T, SecureUniqueDeleter<T>>; + +template<typename T, typename... Args> +secure_unique_ptr<T> make_secure_unique(Args&&... as) +{ + T* p = secure_allocator<T>().allocate(1); + + // initialize in place, and return as secure_unique_ptr + try { + return secure_unique_ptr<T>(new (p) T(std::forward(as)...)); + } catch (...) { + secure_allocator<T>().deallocate(p, 1); + throw; + } +} + #endif // BITCOIN_SUPPORT_ALLOCATORS_SECURE_H diff --git a/src/sync.h b/src/sync.h index 7242a793ab..45d40b5fdc 100644 --- a/src/sync.h +++ b/src/sync.h @@ -301,6 +301,10 @@ inline MutexType* MaybeCheckNotHeld(MutexType* m) LOCKS_EXCLUDED(m) LOCK_RETURNE //! gcc and the -Wreturn-stack-address flag in clang, both enabled by default. #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) +/** An implementation of a semaphore. + * + * See https://en.wikipedia.org/wiki/Semaphore_(programming) + */ class CSemaphore { private: @@ -309,25 +313,33 @@ private: int value; public: - explicit CSemaphore(int init) : value(init) {} + explicit CSemaphore(int init) noexcept : value(init) {} - void wait() + // Disallow default construct, copy, move. + CSemaphore() = delete; + CSemaphore(const CSemaphore&) = delete; + CSemaphore(CSemaphore&&) = delete; + CSemaphore& operator=(const CSemaphore&) = delete; + CSemaphore& operator=(CSemaphore&&) = delete; + + void wait() noexcept { std::unique_lock<std::mutex> lock(mutex); condition.wait(lock, [&]() { return value >= 1; }); value--; } - bool try_wait() + bool try_wait() noexcept { std::lock_guard<std::mutex> lock(mutex); - if (value < 1) + if (value < 1) { return false; + } value--; return true; } - void post() + void post() noexcept { { std::lock_guard<std::mutex> lock(mutex); @@ -345,45 +357,64 @@ private: bool fHaveGrant; public: - void Acquire() + void Acquire() noexcept { - if (fHaveGrant) + if (fHaveGrant) { return; + } sem->wait(); fHaveGrant = true; } - void Release() + void Release() noexcept { - if (!fHaveGrant) + if (!fHaveGrant) { return; + } sem->post(); fHaveGrant = false; } - bool TryAcquire() + bool TryAcquire() noexcept { - if (!fHaveGrant && sem->try_wait()) + if (!fHaveGrant && sem->try_wait()) { fHaveGrant = true; + } return fHaveGrant; } - void MoveTo(CSemaphoreGrant& grant) + // Disallow copy. + CSemaphoreGrant(const CSemaphoreGrant&) = delete; + CSemaphoreGrant& operator=(const CSemaphoreGrant&) = delete; + + // Allow move. + CSemaphoreGrant(CSemaphoreGrant&& other) noexcept + { + sem = other.sem; + fHaveGrant = other.fHaveGrant; + other.fHaveGrant = false; + other.sem = nullptr; + } + + CSemaphoreGrant& operator=(CSemaphoreGrant&& other) noexcept { - grant.Release(); - grant.sem = sem; - grant.fHaveGrant = fHaveGrant; - fHaveGrant = false; + Release(); + sem = other.sem; + fHaveGrant = other.fHaveGrant; + other.fHaveGrant = false; + other.sem = nullptr; + return *this; } - CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {} + CSemaphoreGrant() noexcept : sem(nullptr), fHaveGrant(false) {} - explicit CSemaphoreGrant(CSemaphore& sema, bool fTry = false) : sem(&sema), fHaveGrant(false) + explicit CSemaphoreGrant(CSemaphore& sema, bool fTry = false) noexcept : sem(&sema), fHaveGrant(false) { - if (fTry) + if (fTry) { TryAcquire(); - else + } else { Acquire(); + } } ~CSemaphoreGrant() @@ -391,7 +422,7 @@ public: Release(); } - operator bool() const + explicit operator bool() const noexcept { return fHaveGrant; } diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index 941018a820..b01ba81c5f 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -1019,7 +1019,7 @@ static auto MakeCorruptPeersDat() std::optional<CNetAddr> resolved{LookupHost("252.2.2.2", false)}; BOOST_REQUIRE(resolved.has_value()); AddrInfo info = AddrInfo(addr, resolved.value()); - s << WithParams(CAddress::V1_DISK, info); + s << CAddress::V1_DISK(info); return s; } diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index 13cb1cc314..c6800c498b 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -8,10 +8,12 @@ #include <node/context.h> #include <node/kernel_notifications.h> #include <script/solver.h> +#include <primitives/block.h> #include <util/chaintype.h> #include <validation.h> #include <boost/test/unit_test.hpp> +#include <test/util/logging.h> #include <test/util/setup_common.h> using node::BLOCK_SERIALIZATION_HEADER_SIZE; @@ -72,13 +74,13 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_scan_unlink_already_pruned_files, TestChain // Check that the file is not unlinked after ScanAndUnlinkAlreadyPrunedFiles // if m_have_pruned is not yet set WITH_LOCK(chainman->GetMutex(), blockman.ScanAndUnlinkAlreadyPrunedFiles()); - BOOST_CHECK(!AutoFile(blockman.OpenBlockFile(pos, true)).IsNull()); + BOOST_CHECK(!blockman.OpenBlockFile(pos, true).IsNull()); // Check that the file is unlinked after ScanAndUnlinkAlreadyPrunedFiles // once m_have_pruned is set blockman.m_have_pruned = true; WITH_LOCK(chainman->GetMutex(), blockman.ScanAndUnlinkAlreadyPrunedFiles()); - BOOST_CHECK(AutoFile(blockman.OpenBlockFile(pos, true)).IsNull()); + BOOST_CHECK(blockman.OpenBlockFile(pos, true).IsNull()); // Check that calling with already pruned files doesn't cause an error WITH_LOCK(chainman->GetMutex(), blockman.ScanAndUnlinkAlreadyPrunedFiles()); @@ -88,7 +90,7 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_scan_unlink_already_pruned_files, TestChain BOOST_CHECK_NE(old_tip, new_tip); const int new_file_number{WITH_LOCK(chainman->GetMutex(), return new_tip->GetBlockPos().nFile)}; const FlatFilePos new_pos(new_file_number, 0); - BOOST_CHECK(!AutoFile(blockman.OpenBlockFile(new_pos, true)).IsNull()); + BOOST_CHECK(!blockman.OpenBlockFile(new_pos, true).IsNull()); } BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) @@ -130,4 +132,73 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) BOOST_CHECK(!blockman.CheckBlockDataAvailability(tip, *last_pruned_block)); } +BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) +{ + KernelNotifications notifications{m_node.exit_status}; + node::BlockManager::Options blockman_opts{ + .chainparams = Params(), + .blocks_dir = m_args.GetBlocksDirPath(), + .notifications = notifications, + }; + BlockManager blockman{m_node.kernel->interrupt, blockman_opts}; + + // Test blocks with no transactions, not even a coinbase + CBlock block1; + block1.nVersion = 1; + CBlock block2; + block2.nVersion = 2; + CBlock block3; + block3.nVersion = 3; + + // They are 80 bytes header + 1 byte 0x00 for vtx length + constexpr int TEST_BLOCK_SIZE{81}; + + // Blockstore is empty + BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), 0); + + // Write the first block; dbp=nullptr means this block doesn't already have a disk + // location, so allocate a free location and write it there. + FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1, /*dbp=*/nullptr)}; + + // Write second block + FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2, /*dbp=*/nullptr)}; + + // Two blocks in the file + BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); + + // First two blocks are written as expected + // Errors are expected because block data is junk, thrown AFTER successful read + CBlock read_block; + BOOST_CHECK_EQUAL(read_block.nVersion, 0); + { + ASSERT_DEBUG_LOG("ReadBlockFromDisk: Errors in block header"); + BOOST_CHECK(!blockman.ReadBlockFromDisk(read_block, pos1)); + BOOST_CHECK_EQUAL(read_block.nVersion, 1); + } + { + ASSERT_DEBUG_LOG("ReadBlockFromDisk: Errors in block header"); + BOOST_CHECK(!blockman.ReadBlockFromDisk(read_block, pos2)); + BOOST_CHECK_EQUAL(read_block.nVersion, 2); + } + + // When FlatFilePos* dbp is given, SaveBlockToDisk() will not write or + // overwrite anything to the flat file block storage. It will, however, + // update the blockfile metadata. This is to facilitate reindexing + // when the user has the blocks on disk but the metadata is being rebuilt. + // Verify this behavior by attempting (and failing) to write block 3 data + // to block 2 location. + CBlockFileInfo* block_data = blockman.GetBlockFileInfo(0); + BOOST_CHECK_EQUAL(block_data->nBlocks, 2); + BOOST_CHECK(blockman.SaveBlockToDisk(block3, /*nHeight=*/3, /*dbp=*/&pos2) == pos2); + // Metadata is updated... + BOOST_CHECK_EQUAL(block_data->nBlocks, 3); + // ...but there are still only two blocks in the file + BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); + + // Block 2 was not overwritten: + // SaveBlockToDisk() did not call WriteBlockToDisk() because `FlatFilePos* dbp` was non-null + blockman.ReadBlockFromDisk(read_block, pos2); + BOOST_CHECK_EQUAL(read_block.nVersion, 2); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/coinstatsindex_tests.cpp b/src/test/coinstatsindex_tests.cpp index 787a196a0c..50f3f7d833 100644 --- a/src/test/coinstatsindex_tests.cpp +++ b/src/test/coinstatsindex_tests.cpp @@ -105,7 +105,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_unclean_shutdown, TestChain100Setup) // Send block connected notification, then stop the index without // sending a chainstate flushed notification. Prior to #24138, this // would cause the index to be corrupted and fail to reload. - ValidationInterfaceTest::BlockConnected(index, new_block, new_block_index); + ValidationInterfaceTest::BlockConnected(ChainstateRole::NORMAL, index, new_block, new_block_index); index.Stop(); } diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 9611a872ec..1b11ff6fdf 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -83,7 +83,7 @@ CNetAddr RandAddr(FuzzedDataProvider& fuzzed_data_provider, FastRandomContext& f s << net; s << fast_random_context.randbytes(net_len_map.at(net)); - s >> WithParams(CAddress::V2_NETWORK, addr); + s >> CAddress::V2_NETWORK(addr); } // Return a dummy IPv4 5.5.5.5 if we generated an invalid address. diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp index 1116274e3d..636f11b381 100644 --- a/src/test/fuzz/buffered_file.cpp +++ b/src/test/fuzz/buffered_file.cpp @@ -19,15 +19,12 @@ FUZZ_TARGET(buffered_file) FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider); std::optional<BufferedFile> opt_buffered_file; - FILE* fuzzed_file = fuzzed_file_provider.open(); + CAutoFile fuzzed_file{fuzzed_file_provider.open(), 0}; try { - opt_buffered_file.emplace(fuzzed_file, fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegral<int>()); + opt_buffered_file.emplace(fuzzed_file, fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096)); } catch (const std::ios_base::failure&) { - if (fuzzed_file != nullptr) { - fclose(fuzzed_file); - } } - if (opt_buffered_file && fuzzed_file != nullptr) { + if (opt_buffered_file && !fuzzed_file.IsNull()) { bool setpos_fail = false; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) { CallOneOf( diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp index e46e085ee7..0dab2a2e97 100644 --- a/src/test/fuzz/connman.cpp +++ b/src/test/fuzz/connman.cpp @@ -61,7 +61,7 @@ FUZZ_TARGET(connman, .init = initialize_connman) random_string = fuzzed_data_provider.ConsumeRandomLengthString(64); }, [&] { - connman.AddNode(random_string); + connman.AddNode({random_string, fuzzed_data_provider.ConsumeBool()}); }, [&] { connman.CheckIncomingNonce(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 100a6b4ee4..510ee7fb5b 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -91,9 +91,9 @@ void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj, const P& params } template <typename T> -CDataStream Serialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) +CDataStream Serialize(const T& obj) { - CDataStream ds(ser_type, version); + CDataStream ds{SER_NETWORK, INIT_PROTO_VERSION}; ds << obj; return ds; } @@ -107,12 +107,10 @@ T Deserialize(CDataStream ds) } template <typename T> -void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj, const std::optional<int> protocol_version = std::nullopt, const int ser_type = SER_NETWORK) +void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj) { - CDataStream ds(buffer, ser_type, INIT_PROTO_VERSION); - if (protocol_version) { - ds.SetVersion(*protocol_version); - } else { + CDataStream ds{buffer, SER_NETWORK, INIT_PROTO_VERSION}; + { try { int version; ds >> version; @@ -135,9 +133,9 @@ void AssertEqualAfterSerializeDeserialize(const T& obj, const P& params) assert(Deserialize<T>(Serialize(obj, params), params) == obj); } template <typename T> -void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) +void AssertEqualAfterSerializeDeserialize(const T& obj) { - assert(Deserialize<T>(Serialize(obj, version, ser_type)) == obj); + assert(Deserialize<T>(Serialize(obj)) == obj); } } // namespace @@ -254,7 +252,7 @@ FUZZ_TARGET(netaddr_deserialize, .init = initialize_deserialize) if (!maybe_na) return; const CNetAddr& na{*maybe_na}; if (na.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(na, ConsumeDeserializationParams<CNetAddr::SerParams>(fdp)); + AssertEqualAfterSerializeDeserialize(na, CNetAddr::V1); } AssertEqualAfterSerializeDeserialize(na, CNetAddr::V2); } @@ -266,7 +264,7 @@ FUZZ_TARGET(service_deserialize, .init = initialize_deserialize) if (!maybe_s) return; const CService& s{*maybe_s}; if (s.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(s, ConsumeDeserializationParams<CNetAddr::SerParams>(fdp)); + AssertEqualAfterSerializeDeserialize(s, CNetAddr::V1); } AssertEqualAfterSerializeDeserialize(s, CNetAddr::V2); if (ser_params.enc == CNetAddr::Encoding::V1) { @@ -281,8 +279,8 @@ FUZZ_TARGET_DESERIALIZE(messageheader_deserialize, { FUZZ_TARGET(address_deserialize, .init = initialize_deserialize) { FuzzedDataProvider fdp{buffer.data(), buffer.size()}; - const auto ser_enc{ConsumeDeserializationParams<CNetAddr::SerParams>(fdp)}; - const auto maybe_a{ConsumeDeserializable<CAddress>(fdp, CAddress::SerParams{{ser_enc}, CAddress::Format::Network})}; + const auto ser_enc{ConsumeDeserializationParams<CAddress::SerParams>(fdp)}; + const auto maybe_a{ConsumeDeserializable<CAddress>(fdp, ser_enc)}; if (!maybe_a) return; const CAddress& a{*maybe_a}; // A CAddress in V1 mode will roundtrip diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index f5697f14b1..5245b4607b 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -29,7 +29,7 @@ #include <utility> #include <vector> -#ifdef __AFL_FUZZ_INIT +#if defined(PROVIDE_FUZZ_MAIN_FUNCTION) && defined(__AFL_FUZZ_INIT) __AFL_FUZZ_INIT(); #endif @@ -192,17 +192,11 @@ int main(int argc, char** argv) { initialize(); static const auto& test_one_input = *Assert(g_test_one_input); -#ifdef __AFL_HAVE_MANUAL_CONTROL - // Enable AFL deferred forkserver mode. Requires compilation using - // afl-clang-fast++. See fuzzing.md for details. - __AFL_INIT(); -#endif - #ifdef __AFL_LOOP // Enable AFL persistent mode. Requires compilation using afl-clang-fast++. // See fuzzing.md for details. const uint8_t* buffer = __AFL_FUZZ_TESTCASE_BUF; - while (__AFL_LOOP(1000)) { + while (__AFL_LOOP(100000)) { size_t buffer_len = __AFL_FUZZ_TESTCASE_LEN; test_one_input({buffer, buffer_len}); } diff --git a/src/test/fuzz/golomb_rice.cpp b/src/test/fuzz/golomb_rice.cpp index e006653ca9..f3073c5c97 100644 --- a/src/test/fuzz/golomb_rice.cpp +++ b/src/test/fuzz/golomb_rice.cpp @@ -51,7 +51,7 @@ FUZZ_TARGET(golomb_rice) for (int i = 0; i < n; ++i) { elements.insert(ConsumeRandomLengthByteVector(fuzzed_data_provider, 16)); } - CVectorWriter stream(SER_NETWORK, 0, golomb_rice_data, 0); + CVectorWriter stream{0, golomb_rice_data, 0}; WriteCompactSize(stream, static_cast<uint32_t>(elements.size())); BitStreamWriter<CVectorWriter> bitwriter(stream); if (!elements.empty()) { @@ -68,7 +68,7 @@ FUZZ_TARGET(golomb_rice) std::vector<uint64_t> decoded_deltas; { - SpanReader stream{SER_NETWORK, 0, golomb_rice_data}; + SpanReader stream{0, golomb_rice_data}; BitStreamReader<SpanReader> bitreader{stream}; const uint32_t n = static_cast<uint32_t>(ReadCompactSize(stream)); for (uint32_t i = 0; i < n; ++i) { @@ -80,7 +80,7 @@ FUZZ_TARGET(golomb_rice) { const std::vector<uint8_t> random_bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider, 1024); - SpanReader stream{SER_NETWORK, 0, random_bytes}; + SpanReader stream{0, random_bytes}; uint32_t n; try { n = static_cast<uint32_t>(ReadCompactSize(stream)); diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp index 60f4081432..be45443172 100644 --- a/src/test/fuzz/key.cpp +++ b/src/test/fuzz/key.cpp @@ -186,7 +186,7 @@ FUZZ_TARGET(key, .init = initialize_key) const CTxDestination tx_destination = GetDestinationForKey(pubkey, output_type); assert(output_type == OutputType::LEGACY); assert(IsValidDestination(tx_destination)); - assert(CTxDestination{PKHash{pubkey}} == tx_destination); + assert(PKHash{pubkey} == *std::get_if<PKHash>(&tx_destination)); const CScript script_for_destination = GetScriptForDestination(tx_destination); assert(script_for_destination.size() == 25); diff --git a/src/test/fuzz/load_external_block_file.cpp b/src/test/fuzz/load_external_block_file.cpp index bdaa4ad1b8..fc903e5ec2 100644 --- a/src/test/fuzz/load_external_block_file.cpp +++ b/src/test/fuzz/load_external_block_file.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <chainparams.h> +#include <clientversion.h> #include <flatfile.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> @@ -27,8 +28,8 @@ FUZZ_TARGET(load_external_block_file, .init = initialize_load_external_block_fil { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider); - FILE* fuzzed_block_file = fuzzed_file_provider.open(); - if (fuzzed_block_file == nullptr) { + CAutoFile fuzzed_block_file{fuzzed_file_provider.open(), CLIENT_VERSION}; + if (fuzzed_block_file.IsNull()) { return; } if (fuzzed_data_provider.ConsumeBool()) { diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp index 88d6e96eac..21d8dab536 100644 --- a/src/test/fuzz/p2p_transport_serialization.cpp +++ b/src/test/fuzz/p2p_transport_serialization.cpp @@ -328,6 +328,9 @@ void SimulationTest(Transport& initiator, Transport& responder, R& rng, FuzzedDa // Make sure all expected messages were received. assert(expected[0].empty()); assert(expected[1].empty()); + + // Compare session IDs. + assert(transports[0]->GetInfo().session_id == transports[1]->GetInfo().session_id); } std::unique_ptr<Transport> MakeV1Transport(NodeId nodeid) noexcept diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp new file mode 100644 index 0000000000..4c81c0b679 --- /dev/null +++ b/src/test/fuzz/package_eval.cpp @@ -0,0 +1,294 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <consensus/validation.h> +#include <node/context.h> +#include <node/mempool_args.h> +#include <node/miner.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> +#include <test/fuzz/util/mempool.h> +#include <test/util/mining.h> +#include <test/util/script.h> +#include <test/util/setup_common.h> +#include <test/util/txmempool.h> +#include <util/rbf.h> +#include <validation.h> +#include <validationinterface.h> + +using node::NodeContext; + +namespace { + +const TestingSetup* g_setup; +std::vector<COutPoint> g_outpoints_coinbase_init_mature; + +struct MockedTxPool : public CTxMemPool { + void RollingFeeUpdate() EXCLUSIVE_LOCKS_REQUIRED(!cs) + { + LOCK(cs); + lastRollingFeeUpdate = GetTime(); + blockSinceLastRollingFeeBump = true; + } +}; + +void initialize_tx_pool() +{ + static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); + g_setup = testing_setup.get(); + + for (int i = 0; i < 2 * COINBASE_MATURITY; ++i) { + COutPoint prevout{MineBlock(g_setup->m_node, P2WSH_OP_TRUE)}; + if (i < COINBASE_MATURITY) { + // Remember the txids to avoid expensive disk access later on + g_outpoints_coinbase_init_mature.push_back(prevout); + } + } + SyncWithValidationInterfaceQueue(); +} + +struct OutpointsUpdater final : public CValidationInterface { + std::set<COutPoint>& m_mempool_outpoints; + + explicit OutpointsUpdater(std::set<COutPoint>& r) + : m_mempool_outpoints{r} {} + + void TransactionAddedToMempool(const CTransactionRef& tx, uint64_t /* mempool_sequence */) override + { + // for coins spent we always want to be able to rbf so they're not removed + + // outputs from this tx can now be spent + for (uint32_t index{0}; index < tx->vout.size(); ++index) { + m_mempool_outpoints.insert(COutPoint{tx->GetHash(), index}); + } + } + + void TransactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason, uint64_t /* mempool_sequence */) override + { + // outpoints spent by this tx are now available + for (const auto& input : tx->vin) { + // Could already exist if this was a replacement + m_mempool_outpoints.insert(input.prevout); + } + // outpoints created by this tx no longer exist + for (uint32_t index{0}; index < tx->vout.size(); ++index) { + m_mempool_outpoints.erase(COutPoint{tx->GetHash(), index}); + } + } +}; + +struct TransactionsDelta final : public CValidationInterface { + std::set<CTransactionRef>& m_added; + + explicit TransactionsDelta(std::set<CTransactionRef>& a) + : m_added{a} {} + + void TransactionAddedToMempool(const CTransactionRef& tx, uint64_t /* mempool_sequence */) override + { + // Transactions may be entered and booted any number of times + m_added.insert(tx); + } + + void TransactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason, uint64_t /* mempool_sequence */) override + { + // Transactions may be entered and booted any number of times + m_added.erase(tx); + } +}; + +void MockTime(FuzzedDataProvider& fuzzed_data_provider, const Chainstate& chainstate) +{ + const auto time = ConsumeTime(fuzzed_data_provider, + chainstate.m_chain.Tip()->GetMedianTimePast() + 1, + std::numeric_limits<decltype(chainstate.m_chain.Tip()->nTime)>::max()); + SetMockTime(time); +} + +CTxMemPool MakeMempool(FuzzedDataProvider& fuzzed_data_provider, const NodeContext& node) +{ + // Take the default options for tests... + CTxMemPool::Options mempool_opts{MemPoolOptionsForTest(node)}; + + + // ...override specific options for this specific fuzz suite + mempool_opts.limits.ancestor_count = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 50); + mempool_opts.limits.ancestor_size_vbytes = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 202) * 1'000; + mempool_opts.limits.descendant_count = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 50); + mempool_opts.limits.descendant_size_vbytes = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 202) * 1'000; + mempool_opts.max_size_bytes = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 200) * 1'000'000; + mempool_opts.expiry = std::chrono::hours{fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 999)}; + nBytesPerSigOp = fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(1, 999); + + mempool_opts.estimator = nullptr; + mempool_opts.check_ratio = 1; + mempool_opts.require_standard = fuzzed_data_provider.ConsumeBool(); + + // ...and construct a CTxMemPool from it + return CTxMemPool{mempool_opts}; +} + +FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + const auto& node = g_setup->m_node; + auto& chainstate{static_cast<DummyChainState&>(node.chainman->ActiveChainstate())}; + + MockTime(fuzzed_data_provider, chainstate); + + // All RBF-spendable outpoints outside of the unsubmitted package + std::set<COutPoint> mempool_outpoints; + std::map<COutPoint, CAmount> outpoints_value; + for (const auto& outpoint : g_outpoints_coinbase_init_mature) { + Assert(mempool_outpoints.insert(outpoint).second); + outpoints_value[outpoint] = 50 * COIN; + } + + auto outpoints_updater = std::make_shared<OutpointsUpdater>(mempool_outpoints); + RegisterSharedValidationInterface(outpoints_updater); + + CTxMemPool tx_pool_{MakeMempool(fuzzed_data_provider, node)}; + MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_); + + chainstate.SetMempool(&tx_pool); + + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300) + { + Assert(!mempool_outpoints.empty()); + + std::vector<CTransactionRef> txs; + + // Make packages of 1-to-26 transactions + const auto num_txs = (size_t) fuzzed_data_provider.ConsumeIntegralInRange<int>(1, 26); + std::set<COutPoint> package_outpoints; + while (txs.size() < num_txs) { + + // Last transaction in a package needs to be a child of parents to get further in validation + // so the last transaction to be generated(in a >1 package) must spend all package-made outputs + // Note that this test currently only spends package outputs in last transaction. + bool last_tx = num_txs > 1 && txs.size() == num_txs - 1; + + // Create transaction to add to the mempool + const CTransactionRef tx = [&] { + CMutableTransaction tx_mut; + tx_mut.nVersion = CTransaction::CURRENT_VERSION; + tx_mut.nLockTime = fuzzed_data_provider.ConsumeBool() ? 0 : fuzzed_data_provider.ConsumeIntegral<uint32_t>(); + // Last tx will sweep all outpoints in package + const auto num_in = last_tx ? package_outpoints.size() : fuzzed_data_provider.ConsumeIntegralInRange<int>(1, mempool_outpoints.size()); + const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange<int>(1, mempool_outpoints.size() * 2); + + auto& outpoints = last_tx ? package_outpoints : mempool_outpoints; + + Assert(!outpoints.empty()); + + CAmount amount_in{0}; + for (size_t i = 0; i < num_in; ++i) { + // Pop random outpoint + auto pop = outpoints.begin(); + std::advance(pop, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, outpoints.size() - 1)); + const auto outpoint = *pop; + outpoints.erase(pop); + // no need to update or erase from outpoints_value + amount_in += outpoints_value.at(outpoint); + + // Create input + const auto sequence = ConsumeSequence(fuzzed_data_provider); + const auto script_sig = CScript{}; + const auto script_wit_stack = std::vector<std::vector<uint8_t>>{WITNESS_STACK_ELEM_OP_TRUE}; + CTxIn in; + in.prevout = outpoint; + in.nSequence = sequence; + in.scriptSig = script_sig; + in.scriptWitness.stack = script_wit_stack; + + tx_mut.vin.push_back(in); + } + const auto amount_fee = fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(0, amount_in); + const auto amount_out = (amount_in - amount_fee) / num_out; + for (int i = 0; i < num_out; ++i) { + tx_mut.vout.emplace_back(amount_out, P2WSH_OP_TRUE); + } + // TODO vary transaction sizes to catch size-related issues + auto tx = MakeTransactionRef(tx_mut); + // Restore previously removed outpoints, except in-package outpoints + if (!last_tx) { + for (const auto& in : tx->vin) { + Assert(outpoints.insert(in.prevout).second); + } + // Cache the in-package outpoints being made + for (size_t i = 0; i < tx->vout.size(); ++i) { + package_outpoints.emplace(tx->GetHash(), i); + } + } + // We need newly-created values for the duration of this run + for (size_t i = 0; i < tx->vout.size(); ++i) { + outpoints_value[COutPoint(tx->GetHash(), i)] = tx->vout[i].nValue; + } + return tx; + }(); + txs.push_back(tx); + } + + if (fuzzed_data_provider.ConsumeBool()) { + MockTime(fuzzed_data_provider, chainstate); + } + if (fuzzed_data_provider.ConsumeBool()) { + tx_pool.RollingFeeUpdate(); + } + if (fuzzed_data_provider.ConsumeBool()) { + const auto& txid = fuzzed_data_provider.ConsumeBool() ? + txs.back()->GetHash() : + PickValue(fuzzed_data_provider, mempool_outpoints).hash; + const auto delta = fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(-50 * COIN, +50 * COIN); + tx_pool.PrioritiseTransaction(txid, delta); + } + + // Remember all added transactions + std::set<CTransactionRef> added; + auto txr = std::make_shared<TransactionsDelta>(added); + RegisterSharedValidationInterface(txr); + const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); + + // When there are multiple transactions in the package, we call ProcessNewPackage(txs, test_accept=false) + // and AcceptToMemoryPool(txs.back(), test_accept=true). When there is only 1 transaction, we might flip it + // (the package is a test accept and ATMP is a submission). + auto single_submit = txs.size() == 1 && fuzzed_data_provider.ConsumeBool(); + + const auto result_package = WITH_LOCK(::cs_main, + return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit)); + // If something went wrong due to a package-specific policy, it might not return a + // validation result for the transaction. + if (result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) { + auto it = result_package.m_tx_results.find(txs.back()->GetWitnessHash()); + Assert(it != result_package.m_tx_results.end()); + Assert(it->second.m_result_type == MempoolAcceptResult::ResultType::VALID || + it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID || + it->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY); + } + + const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(), bypass_limits, /*test_accept=*/!single_submit)); + const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; + + SyncWithValidationInterfaceQueue(); + UnregisterSharedValidationInterface(txr); + + // There is only 1 transaction in the package. We did a test-package-accept and a ATMP + if (single_submit) { + Assert(accepted != added.empty()); + Assert(accepted == res.m_state.IsValid()); + if (accepted) { + Assert(added.size() == 1); + Assert(txs.back() == *added.begin()); + } + } else { + // This is empty if it fails early checks, or "full" if transactions are looked at deeper + Assert(result_package.m_tx_results.size() == txs.size() || result_package.m_tx_results.empty()); + } + } + + UnregisterSharedValidationInterface(outpoints_updater); + + WITH_LOCK(::cs_main, tx_pool.check(chainstate.CoinsTip(), chainstate.m_chain.Height() + 1)); +} +} // namespace diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 74f06b481a..27bb60d6b6 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -80,6 +80,7 @@ const std::vector<std::string> RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ "gettxoutproof", // avoid prohibitively slow execution "importmempool", // avoid reading from disk "importwallet", // avoid reading from disk + "loadtxoutset", // avoid reading from disk "loadwallet", // avoid reading from disk "savemempool", // disabled as a precautionary measure: may take a file path argument in the future "setban", // avoid DNS lookups @@ -110,6 +111,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{ "generate", "generateblock", "getaddednodeinfo", + "getaddrmaninfo", "getbestblockhash", "getblock", "getblockchaininfo", @@ -121,6 +123,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{ "getblockstats", "getblocktemplate", "getchaintips", + "getchainstates", "getchaintxstats", "getconnectioncount", "getdeploymentinfo", diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp index acc82f55f6..fe41a8c6ae 100644 --- a/src/test/fuzz/script.cpp +++ b/src/test/fuzz/script.cpp @@ -149,13 +149,16 @@ FUZZ_TARGET(script, .init = initialize_script) const CTxDestination tx_destination_2{ConsumeTxDestination(fuzzed_data_provider)}; const std::string encoded_dest{EncodeDestination(tx_destination_1)}; const UniValue json_dest{DescribeAddress(tx_destination_1)}; - Assert(tx_destination_1 == DecodeDestination(encoded_dest)); (void)GetKeyForDestination(/*store=*/{}, tx_destination_1); const CScript dest{GetScriptForDestination(tx_destination_1)}; const bool valid{IsValidDestination(tx_destination_1)}; - Assert(dest.empty() != valid); - Assert(valid == IsValidDestinationString(encoded_dest)); + if (!std::get_if<PubKeyDestination>(&tx_destination_1)) { + // Only try to round trip non-pubkey destinations since PubKeyDestination has no encoding + Assert(dest.empty() != valid); + Assert(tx_destination_1 == DecodeDestination(encoded_dest)); + Assert(valid == IsValidDestinationString(encoded_dest)); + } (void)(tx_destination_1 < tx_destination_2); if (tx_destination_1 == tx_destination_2) { diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp index 7862be2f21..66c862a6f9 100644 --- a/src/test/fuzz/script_assets_test_minimizer.cpp +++ b/src/test/fuzz/script_assets_test_minimizer.cpp @@ -54,7 +54,7 @@ CMutableTransaction TxFromHex(const std::string& str) { CMutableTransaction tx; try { - SpanReader{SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, CheckedParseHex(str)} >> tx; + SpanReader{SERIALIZE_TRANSACTION_NO_WITNESS, CheckedParseHex(str)} >> tx; } catch (const std::ios_base::failure&) { throw std::runtime_error("Tx deserialization failure"); } @@ -68,7 +68,7 @@ std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue) for (size_t i = 0; i < univalue.size(); ++i) { CTxOut txout; try { - SpanReader{SER_DISK, 0, CheckedParseHex(univalue[i].get_str())} >> txout; + SpanReader{0, CheckedParseHex(univalue[i].get_str())} >> txout; } catch (const std::ios_base::failure&) { throw std::runtime_error("Prevout invalid format"); } diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp index ca2218e94c..87ca2f6aed 100644 --- a/src/test/fuzz/util.cpp +++ b/src/test/fuzz/util.cpp @@ -173,6 +173,15 @@ CTxDestination ConsumeTxDestination(FuzzedDataProvider& fuzzed_data_provider) no tx_destination = CNoDestination{}; }, [&] { + bool compressed = fuzzed_data_provider.ConsumeBool(); + CPubKey pk{ConstructPubKeyBytes( + fuzzed_data_provider, + ConsumeFixedLengthByteVector(fuzzed_data_provider, (compressed ? CPubKey::COMPRESSED_SIZE : CPubKey::SIZE)), + compressed + )}; + tx_destination = PubKeyDestination{pk}; + }, + [&] { tx_destination = PKHash{ConsumeUInt160(fuzzed_data_provider)}; }, [&] { @@ -188,15 +197,11 @@ CTxDestination ConsumeTxDestination(FuzzedDataProvider& fuzzed_data_provider) no tx_destination = WitnessV1Taproot{XOnlyPubKey{ConsumeUInt256(fuzzed_data_provider)}}; }, [&] { - WitnessUnknown witness_unknown{}; - witness_unknown.version = fuzzed_data_provider.ConsumeIntegralInRange(2, 16); - std::vector<uint8_t> witness_unknown_program_1{fuzzed_data_provider.ConsumeBytes<uint8_t>(40)}; - if (witness_unknown_program_1.size() < 2) { - witness_unknown_program_1 = {0, 0}; + std::vector<unsigned char> program{ConsumeRandomLengthByteVector(fuzzed_data_provider, /*max_length=*/40)}; + if (program.size() < 2) { + program = {0, 0}; } - witness_unknown.length = witness_unknown_program_1.size(); - std::copy(witness_unknown_program_1.begin(), witness_unknown_program_1.end(), witness_unknown.program); - tx_destination = witness_unknown; + tx_destination = WitnessUnknown{fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(2, 16), program}; })}; Assert(call_size == std::variant_size_v<CTxDestination>); return tx_destination; diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index a990797ca7..54afcef989 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(siphash) (uint64_t(x+4)<<32)|(uint64_t(x+5)<<40)|(uint64_t(x+6)<<48)|(uint64_t(x+7)<<56)); } - CHashWriter ss(SER_DISK, CLIENT_VERSION); + CHashWriter ss{CLIENT_VERSION}; CMutableTransaction tx; // Note these tests were originally written with tx.nVersion=1 // and the test would be affected by default tx version bumps if not fixed. diff --git a/src/test/miniminer_tests.cpp b/src/test/miniminer_tests.cpp index da724f8d7b..f65356936b 100644 --- a/src/test/miniminer_tests.cpp +++ b/src/test/miniminer_tests.cpp @@ -77,66 +77,66 @@ BOOST_FIXTURE_TEST_CASE(miniminer_1p1c, TestChain100Setup) const CAmount normal_fee{CENT/200}; const CAmount high_fee{CENT/10}; - // Create a parent tx1 and child tx2 with normal fees: - const auto tx1 = make_tx({COutPoint{m_coinbase_txns[0]->GetHash(), 0}}, /*num_outputs=*/2); + // Create a parent tx0 and child tx1 with normal fees: + const auto tx0 = make_tx({COutPoint{m_coinbase_txns[0]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx0)); + const auto tx1 = make_tx({COutPoint{tx0->GetHash(), 0}}, /*num_outputs=*/1); pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx1)); - const auto tx2 = make_tx({COutPoint{tx1->GetHash(), 0}}, /*num_outputs=*/1); - pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx2)); - // Create a low-feerate parent tx3 and high-feerate child tx4 (cpfp) - const auto tx3 = make_tx({COutPoint{m_coinbase_txns[1]->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(low_fee).FromTx(tx3)); - const auto tx4 = make_tx({COutPoint{tx3->GetHash(), 0}}, /*num_outputs=*/1); - pool.addUnchecked(entry.Fee(high_fee).FromTx(tx4)); + // Create a low-feerate parent tx2 and high-feerate child tx3 (cpfp) + const auto tx2 = make_tx({COutPoint{m_coinbase_txns[1]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx2)); + const auto tx3 = make_tx({COutPoint{tx2->GetHash(), 0}}, /*num_outputs=*/1); + pool.addUnchecked(entry.Fee(high_fee).FromTx(tx3)); - // Create a parent tx5 and child tx6 where both have low fees - const auto tx5 = make_tx({COutPoint{m_coinbase_txns[2]->GetHash(), 0}}, /*num_outputs=*/2); + // Create a parent tx4 and child tx5 where both have low fees + const auto tx4 = make_tx({COutPoint{m_coinbase_txns[2]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx4)); + const auto tx5 = make_tx({COutPoint{tx4->GetHash(), 0}}, /*num_outputs=*/1); pool.addUnchecked(entry.Fee(low_fee).FromTx(tx5)); - const auto tx6 = make_tx({COutPoint{tx5->GetHash(), 0}}, /*num_outputs=*/1); - pool.addUnchecked(entry.Fee(low_fee).FromTx(tx6)); - // Make tx6's modified fee much higher than its base fee. This should cause it to pass + // Make tx5's modified fee much higher than its base fee. This should cause it to pass // the fee-related checks despite being low-feerate. - pool.PrioritiseTransaction(tx6->GetHash(), CENT/100); + pool.PrioritiseTransaction(tx5->GetHash(), CENT/100); - // Create a high-feerate parent tx7, low-feerate child tx8 - const auto tx7 = make_tx({COutPoint{m_coinbase_txns[3]->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(high_fee).FromTx(tx7)); - const auto tx8 = make_tx({COutPoint{tx7->GetHash(), 0}}, /*num_outputs=*/1); - pool.addUnchecked(entry.Fee(low_fee).FromTx(tx8)); + // Create a high-feerate parent tx6, low-feerate child tx7 + const auto tx6 = make_tx({COutPoint{m_coinbase_txns[3]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(high_fee).FromTx(tx6)); + const auto tx7 = make_tx({COutPoint{tx6->GetHash(), 0}}, /*num_outputs=*/1); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx7)); std::vector<COutPoint> all_unspent_outpoints({ - COutPoint{tx1->GetHash(), 1}, - COutPoint{tx2->GetHash(), 0}, - COutPoint{tx3->GetHash(), 1}, - COutPoint{tx4->GetHash(), 0}, - COutPoint{tx5->GetHash(), 1}, - COutPoint{tx6->GetHash(), 0}, - COutPoint{tx7->GetHash(), 1}, - COutPoint{tx8->GetHash(), 0} - }); - for (const auto& outpoint : all_unspent_outpoints) BOOST_CHECK(!pool.isSpent(outpoint)); - - std::vector<COutPoint> all_spent_outpoints({ + COutPoint{tx0->GetHash(), 1}, COutPoint{tx1->GetHash(), 0}, + COutPoint{tx2->GetHash(), 1}, COutPoint{tx3->GetHash(), 0}, + COutPoint{tx4->GetHash(), 1}, COutPoint{tx5->GetHash(), 0}, + COutPoint{tx6->GetHash(), 1}, COutPoint{tx7->GetHash(), 0} }); + for (const auto& outpoint : all_unspent_outpoints) BOOST_CHECK(!pool.isSpent(outpoint)); + + std::vector<COutPoint> all_spent_outpoints({ + COutPoint{tx0->GetHash(), 0}, + COutPoint{tx2->GetHash(), 0}, + COutPoint{tx4->GetHash(), 0}, + COutPoint{tx6->GetHash(), 0} + }); for (const auto& outpoint : all_spent_outpoints) BOOST_CHECK(pool.GetConflictTx(outpoint) != nullptr); std::vector<COutPoint> all_parent_outputs({ - COutPoint{tx1->GetHash(), 0}, - COutPoint{tx1->GetHash(), 1}, - COutPoint{tx3->GetHash(), 0}, - COutPoint{tx3->GetHash(), 1}, - COutPoint{tx5->GetHash(), 0}, - COutPoint{tx5->GetHash(), 1}, - COutPoint{tx7->GetHash(), 0}, - COutPoint{tx7->GetHash(), 1} + COutPoint{tx0->GetHash(), 0}, + COutPoint{tx0->GetHash(), 1}, + COutPoint{tx2->GetHash(), 0}, + COutPoint{tx2->GetHash(), 1}, + COutPoint{tx4->GetHash(), 0}, + COutPoint{tx4->GetHash(), 1}, + COutPoint{tx6->GetHash(), 0}, + COutPoint{tx6->GetHash(), 1} }); - std::vector<CTransactionRef> all_transactions{tx1, tx2, tx3, tx4, tx5, tx6, tx7, tx8}; + std::vector<CTransactionRef> all_transactions{tx0, tx1, tx2, tx3, tx4, tx5, tx6, tx7}; struct TxDimensions { int32_t vsize; CAmount mod_fee; CFeeRate feerate; }; @@ -178,47 +178,47 @@ BOOST_FIXTURE_TEST_CASE(miniminer_1p1c, TestChain100Setup) BOOST_CHECK(sanity_check(all_transactions, bump_fees)); BOOST_CHECK_EQUAL(bump_fees.size(), all_unspent_outpoints.size()); - // Check tx1 bumpfee: no other bumper. - const TxDimensions& tx1_dimensions = tx_dims.find(tx1->GetHash())->second; - CAmount bumpfee1 = Find(bump_fees, COutPoint{tx1->GetHash(), 1}); - if (target_feerate <= tx1_dimensions.feerate) { - BOOST_CHECK_EQUAL(bumpfee1, 0); + // Check tx0 bumpfee: no other bumper. + const TxDimensions& tx0_dimensions = tx_dims.find(tx0->GetHash())->second; + CAmount bumpfee0 = Find(bump_fees, COutPoint{tx0->GetHash(), 1}); + if (target_feerate <= tx0_dimensions.feerate) { + BOOST_CHECK_EQUAL(bumpfee0, 0); } else { - // Difference is fee to bump tx1 from current to target feerate. - BOOST_CHECK_EQUAL(bumpfee1, target_feerate.GetFee(tx1_dimensions.vsize) - tx1_dimensions.mod_fee); + // Difference is fee to bump tx0 from current to target feerate. + BOOST_CHECK_EQUAL(bumpfee0, target_feerate.GetFee(tx0_dimensions.vsize) - tx0_dimensions.mod_fee); } - // Check tx3 bumpfee: assisted by tx4. + // Check tx2 bumpfee: assisted by tx3. + const TxDimensions& tx2_dimensions = tx_dims.find(tx2->GetHash())->second; const TxDimensions& tx3_dimensions = tx_dims.find(tx3->GetHash())->second; - const TxDimensions& tx4_dimensions = tx_dims.find(tx4->GetHash())->second; - const CFeeRate tx3_feerate = CFeeRate(tx3_dimensions.mod_fee + tx4_dimensions.mod_fee, tx3_dimensions.vsize + tx4_dimensions.vsize); - CAmount bumpfee3 = Find(bump_fees, COutPoint{tx3->GetHash(), 1}); - if (target_feerate <= tx3_feerate) { - // As long as target feerate is below tx4's ancestor feerate, there is no bump fee. - BOOST_CHECK_EQUAL(bumpfee3, 0); + const CFeeRate tx2_feerate = CFeeRate(tx2_dimensions.mod_fee + tx3_dimensions.mod_fee, tx2_dimensions.vsize + tx3_dimensions.vsize); + CAmount bumpfee2 = Find(bump_fees, COutPoint{tx2->GetHash(), 1}); + if (target_feerate <= tx2_feerate) { + // As long as target feerate is below tx3's ancestor feerate, there is no bump fee. + BOOST_CHECK_EQUAL(bumpfee2, 0); } else { - // Difference is fee to bump tx3 from current to target feerate, without tx4. - BOOST_CHECK_EQUAL(bumpfee3, target_feerate.GetFee(tx3_dimensions.vsize) - tx3_dimensions.mod_fee); + // Difference is fee to bump tx2 from current to target feerate, without tx3. + BOOST_CHECK_EQUAL(bumpfee2, target_feerate.GetFee(tx2_dimensions.vsize) - tx2_dimensions.mod_fee); } - // If tx6’s modified fees are sufficient for tx5 and tx6 to be picked + // If tx5’s modified fees are sufficient for tx4 and tx5 to be picked // into the block, our prospective new transaction would not need to - // bump tx5 when using tx5’s second output. If however even tx6’s + // bump tx4 when using tx4’s second output. If however even tx5’s // modified fee (which essentially indicates "effective feerate") is - // not sufficient to bump tx5, using the second output of tx5 would - // require our transaction to bump tx5 from scratch since we evaluate + // not sufficient to bump tx4, using the second output of tx4 would + // require our transaction to bump tx4 from scratch since we evaluate // transaction packages per ancestor sets and do not consider multiple // children’s fees. + const TxDimensions& tx4_dimensions = tx_dims.find(tx4->GetHash())->second; const TxDimensions& tx5_dimensions = tx_dims.find(tx5->GetHash())->second; - const TxDimensions& tx6_dimensions = tx_dims.find(tx6->GetHash())->second; - const CFeeRate tx5_feerate = CFeeRate(tx5_dimensions.mod_fee + tx6_dimensions.mod_fee, tx5_dimensions.vsize + tx6_dimensions.vsize); - CAmount bumpfee5 = Find(bump_fees, COutPoint{tx5->GetHash(), 1}); - if (target_feerate <= tx5_feerate) { - // As long as target feerate is below tx6's ancestor feerate, there is no bump fee. - BOOST_CHECK_EQUAL(bumpfee5, 0); + const CFeeRate tx4_feerate = CFeeRate(tx4_dimensions.mod_fee + tx5_dimensions.mod_fee, tx4_dimensions.vsize + tx5_dimensions.vsize); + CAmount bumpfee4 = Find(bump_fees, COutPoint{tx4->GetHash(), 1}); + if (target_feerate <= tx4_feerate) { + // As long as target feerate is below tx5's ancestor feerate, there is no bump fee. + BOOST_CHECK_EQUAL(bumpfee4, 0); } else { - // Difference is fee to bump tx5 from current to target feerate, without tx6. - BOOST_CHECK_EQUAL(bumpfee5, target_feerate.GetFee(tx5_dimensions.vsize) - tx5_dimensions.mod_fee); + // Difference is fee to bump tx4 from current to target feerate, without tx5. + BOOST_CHECK_EQUAL(bumpfee4, target_feerate.GetFee(tx4_dimensions.vsize) - tx4_dimensions.mod_fee); } } // Spent outpoints should usually not be requested as they would not be @@ -240,36 +240,36 @@ BOOST_FIXTURE_TEST_CASE(miniminer_1p1c, TestChain100Setup) // even though only one of them is in a to-be-replaced transaction. BOOST_CHECK(sanity_check(all_transactions, bump_fees)); - // Check tx1 bumpfee: no other bumper. - const TxDimensions& tx1_dimensions = tx_dims.find(tx1->GetHash())->second; - CAmount it1_spent = Find(bump_fees, COutPoint{tx1->GetHash(), 0}); - if (target_feerate <= tx1_dimensions.feerate) { - BOOST_CHECK_EQUAL(it1_spent, 0); + // Check tx0 bumpfee: no other bumper. + const TxDimensions& tx0_dimensions = tx_dims.find(tx0->GetHash())->second; + CAmount it0_spent = Find(bump_fees, COutPoint{tx0->GetHash(), 0}); + if (target_feerate <= tx0_dimensions.feerate) { + BOOST_CHECK_EQUAL(it0_spent, 0); } else { - // Difference is fee to bump tx1 from current to target feerate. - BOOST_CHECK_EQUAL(it1_spent, target_feerate.GetFee(tx1_dimensions.vsize) - tx1_dimensions.mod_fee); + // Difference is fee to bump tx0 from current to target feerate. + BOOST_CHECK_EQUAL(it0_spent, target_feerate.GetFee(tx0_dimensions.vsize) - tx0_dimensions.mod_fee); } - // Check tx3 bumpfee: no other bumper, because tx4 is to-be-replaced. - const TxDimensions& tx3_dimensions = tx_dims.find(tx3->GetHash())->second; - const CFeeRate tx3_feerate_unbumped = tx3_dimensions.feerate; - auto it3_spent = Find(bump_fees, COutPoint{tx3->GetHash(), 0}); - if (target_feerate <= tx3_feerate_unbumped) { - BOOST_CHECK_EQUAL(it3_spent, 0); + // Check tx2 bumpfee: no other bumper, because tx3 is to-be-replaced. + const TxDimensions& tx2_dimensions = tx_dims.find(tx2->GetHash())->second; + const CFeeRate tx2_feerate_unbumped = tx2_dimensions.feerate; + auto it2_spent = Find(bump_fees, COutPoint{tx2->GetHash(), 0}); + if (target_feerate <= tx2_feerate_unbumped) { + BOOST_CHECK_EQUAL(it2_spent, 0); } else { - // Difference is fee to bump tx3 from current to target feerate, without tx4. - BOOST_CHECK_EQUAL(it3_spent, target_feerate.GetFee(tx3_dimensions.vsize) - tx3_dimensions.mod_fee); + // Difference is fee to bump tx2 from current to target feerate, without tx3. + BOOST_CHECK_EQUAL(it2_spent, target_feerate.GetFee(tx2_dimensions.vsize) - tx2_dimensions.mod_fee); } - // Check tx5 bumpfee: no other bumper, because tx6 is to-be-replaced. - const TxDimensions& tx5_dimensions = tx_dims.find(tx5->GetHash())->second; - const CFeeRate tx5_feerate_unbumped = tx5_dimensions.feerate; - auto it5_spent = Find(bump_fees, COutPoint{tx5->GetHash(), 0}); - if (target_feerate <= tx5_feerate_unbumped) { - BOOST_CHECK_EQUAL(it5_spent, 0); + // Check tx4 bumpfee: no other bumper, because tx5 is to-be-replaced. + const TxDimensions& tx4_dimensions = tx_dims.find(tx4->GetHash())->second; + const CFeeRate tx4_feerate_unbumped = tx4_dimensions.feerate; + auto it4_spent = Find(bump_fees, COutPoint{tx4->GetHash(), 0}); + if (target_feerate <= tx4_feerate_unbumped) { + BOOST_CHECK_EQUAL(it4_spent, 0); } else { - // Difference is fee to bump tx5 from current to target feerate, without tx6. - BOOST_CHECK_EQUAL(it5_spent, target_feerate.GetFee(tx5_dimensions.vsize) - tx5_dimensions.mod_fee); + // Difference is fee to bump tx4 from current to target feerate, without tx5. + BOOST_CHECK_EQUAL(it4_spent, target_feerate.GetFee(tx4_dimensions.vsize) - tx4_dimensions.mod_fee); } } } @@ -277,145 +277,178 @@ BOOST_FIXTURE_TEST_CASE(miniminer_1p1c, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(miniminer_overlap, TestChain100Setup) { +/* Tx graph for `miniminer_overlap` unit test: + * + * coinbase_tx [mined] ... block-chain + * ------------------------------------------------- + * / | \ \ ... mempool + * / | \ | + * tx0 tx1 tx2 tx4 + * [low] [med] [high] [high] + * \ | / | + * \ | / tx5 + * \ | / [low] + * tx3 / \ + * [high] tx6 tx7 + * [med] [high] + * + * NOTE: + * -> "low"/"med"/"high" denote the _absolute_ fee of each tx + * -> tx3 has 3 inputs and 3 outputs, all other txs have 1 input and 2 outputs + * -> tx3's feerate is lower than tx2's, as tx3 has more weight (due to having more inputs and outputs) + * + * -> tx2_FR = high / tx2_vsize + * -> tx3_FR = high / tx3_vsize + * -> tx3_ASFR = (low+med+high+high) / (tx0_vsize + tx1_vsize + tx2_vsize + tx3_vsize) + * -> tx4_FR = high / tx4_vsize + * -> tx6_ASFR = (high+low+med) / (tx4_vsize + tx5_vsize + tx6_vsize) + * -> tx7_ASFR = (high+low+high) / (tx4_vsize + tx5_vsize + tx7_vsize) */ + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(::cs_main, pool.cs); TestMemPoolEntryHelper entry; - const CAmount low_fee{CENT/2000}; - const CAmount med_fee{CENT/200}; - const CAmount high_fee{CENT/10}; - - // Create 3 parents of different feerates, and 1 child spending from all 3. - const auto tx1 = make_tx({COutPoint{m_coinbase_txns[0]->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(low_fee).FromTx(tx1)); - const auto tx2 = make_tx({COutPoint{m_coinbase_txns[1]->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(med_fee).FromTx(tx2)); - const auto tx3 = make_tx({COutPoint{m_coinbase_txns[2]->GetHash(), 0}}, /*num_outputs=*/2); + const CAmount low_fee{CENT/2000}; // 500 ṩ + const CAmount med_fee{CENT/200}; // 5000 ṩ + const CAmount high_fee{CENT/10}; // 100_000 ṩ + + // Create 3 parents of different feerates, and 1 child spending outputs from all 3 parents. + const auto tx0 = make_tx({COutPoint{m_coinbase_txns[0]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx0)); + const auto tx1 = make_tx({COutPoint{m_coinbase_txns[1]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(med_fee).FromTx(tx1)); + const auto tx2 = make_tx({COutPoint{m_coinbase_txns[2]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(high_fee).FromTx(tx2)); + const auto tx3 = make_tx({COutPoint{tx0->GetHash(), 0}, COutPoint{tx1->GetHash(), 0}, COutPoint{tx2->GetHash(), 0}}, /*num_outputs=*/3); pool.addUnchecked(entry.Fee(high_fee).FromTx(tx3)); - const auto tx4 = make_tx({COutPoint{tx1->GetHash(), 0}, COutPoint{tx2->GetHash(), 0}, COutPoint{tx3->GetHash(), 0}}, /*num_outputs=*/3); - pool.addUnchecked(entry.Fee(high_fee).FromTx(tx4)); // Create 1 grandparent and 1 parent, then 2 children. - const auto tx5 = make_tx({COutPoint{m_coinbase_txns[3]->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(high_fee).FromTx(tx5)); - const auto tx6 = make_tx({COutPoint{tx5->GetHash(), 0}}, /*num_outputs=*/3); - pool.addUnchecked(entry.Fee(low_fee).FromTx(tx6)); - const auto tx7 = make_tx({COutPoint{tx6->GetHash(), 0}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(med_fee).FromTx(tx7)); - const auto tx8 = make_tx({COutPoint{tx6->GetHash(), 1}}, /*num_outputs=*/2); - pool.addUnchecked(entry.Fee(high_fee).FromTx(tx8)); - - std::vector<CTransactionRef> all_transactions{tx1, tx2, tx3, tx4, tx5, tx6, tx7, tx8}; + const auto tx4 = make_tx({COutPoint{m_coinbase_txns[3]->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(high_fee).FromTx(tx4)); + const auto tx5 = make_tx({COutPoint{tx4->GetHash(), 0}}, /*num_outputs=*/3); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx5)); + const auto tx6 = make_tx({COutPoint{tx5->GetHash(), 0}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(med_fee).FromTx(tx6)); + const auto tx7 = make_tx({COutPoint{tx5->GetHash(), 1}}, /*num_outputs=*/2); + pool.addUnchecked(entry.Fee(high_fee).FromTx(tx7)); + + std::vector<CTransactionRef> all_transactions{tx0, tx1, tx2, tx3, tx4, tx5, tx6, tx7}; std::vector<int64_t> tx_vsizes; tx_vsizes.reserve(all_transactions.size()); for (const auto& tx : all_transactions) tx_vsizes.push_back(GetVirtualTransactionSize(*tx)); std::vector<COutPoint> all_unspent_outpoints({ + COutPoint{tx0->GetHash(), 1}, COutPoint{tx1->GetHash(), 1}, COutPoint{tx2->GetHash(), 1}, + COutPoint{tx3->GetHash(), 0}, COutPoint{tx3->GetHash(), 1}, - COutPoint{tx4->GetHash(), 0}, + COutPoint{tx3->GetHash(), 2}, COutPoint{tx4->GetHash(), 1}, - COutPoint{tx4->GetHash(), 2}, - COutPoint{tx5->GetHash(), 1}, - COutPoint{tx6->GetHash(), 2}, - COutPoint{tx7->GetHash(), 0}, - COutPoint{tx8->GetHash(), 0} + COutPoint{tx5->GetHash(), 2}, + COutPoint{tx6->GetHash(), 0}, + COutPoint{tx7->GetHash(), 0} }); for (const auto& outpoint : all_unspent_outpoints) BOOST_CHECK(!pool.isSpent(outpoint)); - const auto tx3_feerate = CFeeRate(high_fee, tx_vsizes[2]); - const auto tx4_feerate = CFeeRate(high_fee, tx_vsizes[3]); - // tx4's feerate is lower than tx3's. same fee, different weight. - BOOST_CHECK(tx3_feerate > tx4_feerate); - const auto tx4_anc_feerate = CFeeRate(low_fee + med_fee + high_fee, tx_vsizes[0] + tx_vsizes[1] + tx_vsizes[3]); - const auto tx5_feerate = CFeeRate(high_fee, tx_vsizes[4]); - const auto tx7_anc_feerate = CFeeRate(low_fee + med_fee, tx_vsizes[5] + tx_vsizes[6]); - const auto tx8_anc_feerate = CFeeRate(low_fee + high_fee, tx_vsizes[5] + tx_vsizes[7]); - BOOST_CHECK(tx5_feerate > tx7_anc_feerate); - BOOST_CHECK(tx5_feerate > tx8_anc_feerate); + const auto tx2_feerate = CFeeRate(high_fee, tx_vsizes[2]); + const auto tx3_feerate = CFeeRate(high_fee, tx_vsizes[3]); + // tx3's feerate is lower than tx2's. same fee, different weight. + BOOST_CHECK(tx2_feerate > tx3_feerate); + const auto tx3_anc_feerate = CFeeRate(low_fee + med_fee + high_fee + high_fee, tx_vsizes[0] + tx_vsizes[1] + tx_vsizes[2] + tx_vsizes[3]); + const auto tx3_iter = pool.GetIter(tx3->GetHash()); + BOOST_CHECK(tx3_anc_feerate == CFeeRate(tx3_iter.value()->GetModFeesWithAncestors(), tx3_iter.value()->GetSizeWithAncestors())); + const auto tx4_feerate = CFeeRate(high_fee, tx_vsizes[4]); + const auto tx6_anc_feerate = CFeeRate(high_fee + low_fee + med_fee, tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[6]); + const auto tx6_iter = pool.GetIter(tx6->GetHash()); + BOOST_CHECK(tx6_anc_feerate == CFeeRate(tx6_iter.value()->GetModFeesWithAncestors(), tx6_iter.value()->GetSizeWithAncestors())); + const auto tx7_anc_feerate = CFeeRate(high_fee + low_fee + high_fee, tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[7]); + const auto tx7_iter = pool.GetIter(tx7->GetHash()); + BOOST_CHECK(tx7_anc_feerate == CFeeRate(tx7_iter.value()->GetModFeesWithAncestors(), tx7_iter.value()->GetSizeWithAncestors())); + BOOST_CHECK(tx4_feerate > tx6_anc_feerate); + BOOST_CHECK(tx4_feerate > tx7_anc_feerate); // Extremely high feerate: everybody's bumpfee is from their full ancestor set. { node::MiniMiner mini_miner(pool, all_unspent_outpoints); const CFeeRate very_high_feerate(COIN); - BOOST_CHECK(tx4_anc_feerate < very_high_feerate); + BOOST_CHECK(tx3_anc_feerate < very_high_feerate); BOOST_CHECK(mini_miner.IsReadyToCalculate()); auto bump_fees = mini_miner.CalculateBumpFees(very_high_feerate); BOOST_CHECK_EQUAL(bump_fees.size(), all_unspent_outpoints.size()); BOOST_CHECK(!mini_miner.IsReadyToCalculate()); BOOST_CHECK(sanity_check(all_transactions, bump_fees)); - const auto tx1_bumpfee = bump_fees.find(COutPoint{tx1->GetHash(), 1}); - BOOST_CHECK(tx1_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx1_bumpfee->second, very_high_feerate.GetFee(tx_vsizes[0]) - low_fee); - const auto tx4_bumpfee = bump_fees.find(COutPoint{tx4->GetHash(), 0}); - BOOST_CHECK(tx4_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx4_bumpfee->second, + const auto tx0_bumpfee = bump_fees.find(COutPoint{tx0->GetHash(), 1}); + BOOST_CHECK(tx0_bumpfee != bump_fees.end()); + BOOST_CHECK_EQUAL(tx0_bumpfee->second, very_high_feerate.GetFee(tx_vsizes[0]) - low_fee); + const auto tx3_bumpfee = bump_fees.find(COutPoint{tx3->GetHash(), 0}); + BOOST_CHECK(tx3_bumpfee != bump_fees.end()); + BOOST_CHECK_EQUAL(tx3_bumpfee->second, very_high_feerate.GetFee(tx_vsizes[0] + tx_vsizes[1] + tx_vsizes[2] + tx_vsizes[3]) - (low_fee + med_fee + high_fee + high_fee)); + const auto tx6_bumpfee = bump_fees.find(COutPoint{tx6->GetHash(), 0}); + BOOST_CHECK(tx6_bumpfee != bump_fees.end()); + BOOST_CHECK_EQUAL(tx6_bumpfee->second, + very_high_feerate.GetFee(tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[6]) - (high_fee + low_fee + med_fee)); const auto tx7_bumpfee = bump_fees.find(COutPoint{tx7->GetHash(), 0}); BOOST_CHECK(tx7_bumpfee != bump_fees.end()); BOOST_CHECK_EQUAL(tx7_bumpfee->second, - very_high_feerate.GetFee(tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[6]) - (high_fee + low_fee + med_fee)); - const auto tx8_bumpfee = bump_fees.find(COutPoint{tx8->GetHash(), 0}); - BOOST_CHECK(tx8_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx8_bumpfee->second, very_high_feerate.GetFee(tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[7]) - (high_fee + low_fee + high_fee)); - // Total fees: if spending multiple outputs from tx4 don't double-count fees. - node::MiniMiner mini_miner_total_tx4(pool, {COutPoint{tx4->GetHash(), 0}, COutPoint{tx4->GetHash(), 1}}); - BOOST_CHECK(mini_miner_total_tx4.IsReadyToCalculate()); - const auto tx4_bump_fee = mini_miner_total_tx4.CalculateTotalBumpFees(very_high_feerate); - BOOST_CHECK(!mini_miner_total_tx4.IsReadyToCalculate()); - BOOST_CHECK(tx4_bump_fee.has_value()); - BOOST_CHECK_EQUAL(tx4_bump_fee.value(), + // Total fees: if spending multiple outputs from tx3 don't double-count fees. + node::MiniMiner mini_miner_total_tx3(pool, {COutPoint{tx3->GetHash(), 0}, COutPoint{tx3->GetHash(), 1}}); + BOOST_CHECK(mini_miner_total_tx3.IsReadyToCalculate()); + const auto tx3_bump_fee = mini_miner_total_tx3.CalculateTotalBumpFees(very_high_feerate); + BOOST_CHECK(!mini_miner_total_tx3.IsReadyToCalculate()); + BOOST_CHECK(tx3_bump_fee.has_value()); + BOOST_CHECK_EQUAL(tx3_bump_fee.value(), very_high_feerate.GetFee(tx_vsizes[0] + tx_vsizes[1] + tx_vsizes[2] + tx_vsizes[3]) - (low_fee + med_fee + high_fee + high_fee)); - // Total fees: if spending both tx7 and tx8, don't double-count fees. - node::MiniMiner mini_miner_tx7_tx8(pool, {COutPoint{tx7->GetHash(), 0}, COutPoint{tx8->GetHash(), 0}}); - BOOST_CHECK(mini_miner_tx7_tx8.IsReadyToCalculate()); - const auto tx7_tx8_bumpfee = mini_miner_tx7_tx8.CalculateTotalBumpFees(very_high_feerate); - BOOST_CHECK(!mini_miner_tx7_tx8.IsReadyToCalculate()); - BOOST_CHECK(tx7_tx8_bumpfee.has_value()); - BOOST_CHECK_EQUAL(tx7_tx8_bumpfee.value(), + // Total fees: if spending both tx6 and tx7, don't double-count fees. + node::MiniMiner mini_miner_tx6_tx7(pool, {COutPoint{tx6->GetHash(), 0}, COutPoint{tx7->GetHash(), 0}}); + BOOST_CHECK(mini_miner_tx6_tx7.IsReadyToCalculate()); + const auto tx6_tx7_bumpfee = mini_miner_tx6_tx7.CalculateTotalBumpFees(very_high_feerate); + BOOST_CHECK(!mini_miner_tx6_tx7.IsReadyToCalculate()); + BOOST_CHECK(tx6_tx7_bumpfee.has_value()); + BOOST_CHECK_EQUAL(tx6_tx7_bumpfee.value(), very_high_feerate.GetFee(tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[6] + tx_vsizes[7]) - (high_fee + low_fee + med_fee + high_fee)); } - // Feerate just below tx5: tx7 and tx8 have different bump fees. + // Feerate just below tx4: tx6 and tx7 have different bump fees. { - const auto just_below_tx5 = CFeeRate(tx5_feerate.GetFeePerK() - 5); + const auto just_below_tx4 = CFeeRate(tx4_feerate.GetFeePerK() - 5); node::MiniMiner mini_miner(pool, all_unspent_outpoints); BOOST_CHECK(mini_miner.IsReadyToCalculate()); - auto bump_fees = mini_miner.CalculateBumpFees(just_below_tx5); + auto bump_fees = mini_miner.CalculateBumpFees(just_below_tx4); BOOST_CHECK(!mini_miner.IsReadyToCalculate()); BOOST_CHECK_EQUAL(bump_fees.size(), all_unspent_outpoints.size()); BOOST_CHECK(sanity_check(all_transactions, bump_fees)); + const auto tx6_bumpfee = bump_fees.find(COutPoint{tx6->GetHash(), 0}); + BOOST_CHECK(tx6_bumpfee != bump_fees.end()); + BOOST_CHECK_EQUAL(tx6_bumpfee->second, just_below_tx4.GetFee(tx_vsizes[5] + tx_vsizes[6]) - (low_fee + med_fee)); const auto tx7_bumpfee = bump_fees.find(COutPoint{tx7->GetHash(), 0}); BOOST_CHECK(tx7_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx7_bumpfee->second, just_below_tx5.GetFee(tx_vsizes[5] + tx_vsizes[6]) - (low_fee + med_fee)); - const auto tx8_bumpfee = bump_fees.find(COutPoint{tx8->GetHash(), 0}); - BOOST_CHECK(tx8_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx8_bumpfee->second, just_below_tx5.GetFee(tx_vsizes[5] + tx_vsizes[7]) - (low_fee + high_fee)); - // Total fees: if spending both tx7 and tx8, don't double-count fees. - node::MiniMiner mini_miner_tx7_tx8(pool, {COutPoint{tx7->GetHash(), 0}, COutPoint{tx8->GetHash(), 0}}); - BOOST_CHECK(mini_miner_tx7_tx8.IsReadyToCalculate()); - const auto tx7_tx8_bumpfee = mini_miner_tx7_tx8.CalculateTotalBumpFees(just_below_tx5); - BOOST_CHECK(!mini_miner_tx7_tx8.IsReadyToCalculate()); - BOOST_CHECK(tx7_tx8_bumpfee.has_value()); - BOOST_CHECK_EQUAL(tx7_tx8_bumpfee.value(), just_below_tx5.GetFee(tx_vsizes[5] + tx_vsizes[6]) - (low_fee + med_fee)); + BOOST_CHECK_EQUAL(tx7_bumpfee->second, just_below_tx4.GetFee(tx_vsizes[5] + tx_vsizes[7]) - (low_fee + high_fee)); + // Total fees: if spending both tx6 and tx7, don't double-count fees. + node::MiniMiner mini_miner_tx6_tx7(pool, {COutPoint{tx6->GetHash(), 0}, COutPoint{tx7->GetHash(), 0}}); + BOOST_CHECK(mini_miner_tx6_tx7.IsReadyToCalculate()); + const auto tx6_tx7_bumpfee = mini_miner_tx6_tx7.CalculateTotalBumpFees(just_below_tx4); + BOOST_CHECK(!mini_miner_tx6_tx7.IsReadyToCalculate()); + BOOST_CHECK(tx6_tx7_bumpfee.has_value()); + BOOST_CHECK_EQUAL(tx6_tx7_bumpfee.value(), just_below_tx4.GetFee(tx_vsizes[5] + tx_vsizes[6]) - (low_fee + med_fee)); } - // Feerate between tx7 and tx8's ancestor feerates: don't need to bump tx6 because tx8 already does. + // Feerate between tx6 and tx7's ancestor feerates: don't need to bump tx5 because tx7 already does. { - const auto just_above_tx7 = CFeeRate(med_fee + 10, tx_vsizes[6]); - BOOST_CHECK(just_above_tx7 <= CFeeRate(low_fee + high_fee, tx_vsizes[5] + tx_vsizes[7])); + const auto just_above_tx6 = CFeeRate(med_fee + 10, tx_vsizes[6]); + BOOST_CHECK(just_above_tx6 <= CFeeRate(low_fee + high_fee, tx_vsizes[5] + tx_vsizes[7])); node::MiniMiner mini_miner(pool, all_unspent_outpoints); BOOST_CHECK(mini_miner.IsReadyToCalculate()); - auto bump_fees = mini_miner.CalculateBumpFees(just_above_tx7); + auto bump_fees = mini_miner.CalculateBumpFees(just_above_tx6); BOOST_CHECK(!mini_miner.IsReadyToCalculate()); BOOST_CHECK_EQUAL(bump_fees.size(), all_unspent_outpoints.size()); BOOST_CHECK(sanity_check(all_transactions, bump_fees)); + const auto tx6_bumpfee = bump_fees.find(COutPoint{tx6->GetHash(), 0}); + BOOST_CHECK(tx6_bumpfee != bump_fees.end()); + BOOST_CHECK_EQUAL(tx6_bumpfee->second, just_above_tx6.GetFee(tx_vsizes[6]) - (med_fee)); const auto tx7_bumpfee = bump_fees.find(COutPoint{tx7->GetHash(), 0}); BOOST_CHECK(tx7_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx7_bumpfee->second, just_above_tx7.GetFee(tx_vsizes[6]) - (med_fee)); - const auto tx8_bumpfee = bump_fees.find(COutPoint{tx8->GetHash(), 0}); - BOOST_CHECK(tx8_bumpfee != bump_fees.end()); - BOOST_CHECK_EQUAL(tx8_bumpfee->second, 0); + BOOST_CHECK_EQUAL(tx7_bumpfee->second, 0); } } BOOST_FIXTURE_TEST_CASE(calculate_cluster, TestChain100Setup) @@ -445,12 +478,12 @@ BOOST_FIXTURE_TEST_CASE(calculate_cluster, TestChain100Setup) const auto cluster_501 = pool.GatherClusters({tx_501->GetHash()}); BOOST_CHECK_EQUAL(cluster_501.size(), 0); - // Zig Zag cluster: - // txp0 txp1 txp2 ... txp48 txp49 - // \ / \ / \ \ / - // txc0 txc1 txc2 ... txc48 - // Note that each transaction's ancestor size is 1 or 3, and each descendant size is 1, 2 or 3. - // However, all of these transactions are in the same cluster. + /* Zig Zag cluster: + * txp0 txp1 txp2 ... txp48 txp49 + * \ / \ / \ \ / + * txc0 txc1 txc2 ... txc48 + * Note that each transaction's ancestor size is 1 or 3, and each descendant size is 1, 2 or 3. + * However, all of these transactions are in the same cluster. */ std::vector<uint256> zigzag_txids; for (auto p{0}; p < 50; ++p) { const auto txp = make_tx({COutPoint{GetRandHash(), 0}}, /*num_outputs=*/2); diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 34d7867079..5976aa3713 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -850,7 +850,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) std::chrono::microseconds time_received_dummy{0}; const auto msg_version = - msg_maker.Make(NetMsgType::VERSION, PROTOCOL_VERSION, services, time, services, WithParams(CAddress::V1_NETWORK, peer_us)); + msg_maker.Make(NetMsgType::VERSION, PROTOCOL_VERSION, services, time, services, CAddress::V1_NETWORK(peer_us)); CDataStream msg_version_stream{msg_version.data, SER_NETWORK, PROTOCOL_VERSION}; m_node.peerman->ProcessMessage( @@ -876,7 +876,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) DataStream s{data}; std::vector<CAddress> addresses; - s >> WithParams(CAddress::V1_NETWORK, addresses); + s >> CAddress::V1_NETWORK(addresses); for (const auto& addr : addresses) { if (addr == expected) { @@ -1031,9 +1031,11 @@ class V2TransportTester bool m_test_initiator; //!< Whether m_transport is the initiator (true) or responder (false) std::vector<uint8_t> m_sent_garbage; //!< The garbage we've sent to m_transport. + std::vector<uint8_t> m_recv_garbage; //!< The garbage we've received from m_transport. std::vector<uint8_t> m_to_send; //!< Bytes we have queued up to send to m_transport. std::vector<uint8_t> m_received; //!< Bytes we have received from m_transport. std::deque<CSerializedNetMsg> m_msg_to_send; //!< Messages to be sent *by* m_transport to us. + bool m_sent_aad{false}; public: /** Construct a tester object. test_initiator: whether the tested transport is initiator. */ @@ -1131,8 +1133,7 @@ public: /** Schedule specified garbage to be sent to the transport. */ void SendGarbage(Span<const uint8_t> garbage) { - // Remember the specified garbage (so we can use it for constructing the garbage - // authentication packet). + // Remember the specified garbage (so we can use it as AAD). m_sent_garbage.assign(garbage.begin(), garbage.end()); // Schedule it for sending. Send(m_sent_garbage); @@ -1191,27 +1192,27 @@ public: Send(ciphertext); } - /** Schedule garbage terminator and authentication packet to be sent to the transport (only - * after ReceiveKey). */ - void SendGarbageTermAuth(size_t garb_auth_data_len = 0, bool garb_auth_ignore = false) + /** Schedule garbage terminator to be sent to the transport (only after ReceiveKey). */ + void SendGarbageTerm() { - // Generate random data to include in the garbage authentication packet (ignored by peer). - auto garb_auth_data = g_insecure_rand_ctx.randbytes<uint8_t>(garb_auth_data_len); // Schedule the garbage terminator to be sent. Send(m_cipher.GetSendGarbageTerminator()); - // Schedule the garbage authentication packet to be sent. - SendPacket(/*content=*/garb_auth_data, /*aad=*/m_sent_garbage, /*ignore=*/garb_auth_ignore); } /** Schedule version packet to be sent to the transport (only after ReceiveKey). */ void SendVersion(Span<const uint8_t> version_data = {}, bool vers_ignore = false) { - SendPacket(/*content=*/version_data, /*aad=*/{}, /*ignore=*/vers_ignore); + Span<const std::uint8_t> aad; + // Set AAD to garbage only for first packet. + if (!m_sent_aad) aad = m_sent_garbage; + SendPacket(/*content=*/version_data, /*aad=*/aad, /*ignore=*/vers_ignore); + m_sent_aad = true; } /** Expect a packet to have been received from transport, process it, and return its contents - * (only after ReceiveKey). By default, decoys are skipped. */ - std::vector<uint8_t> ReceivePacket(Span<const std::byte> aad = {}, bool skip_decoy = true) + * (only after ReceiveKey). Decoys are skipped. Optional associated authenticated data (AAD) is + * expected in the first received packet, no matter if that is a decoy or not. */ + std::vector<uint8_t> ReceivePacket(Span<const std::byte> aad = {}) { std::vector<uint8_t> contents; // Loop as long as there are ignored packets that are to be skipped. @@ -1232,16 +1233,18 @@ public: /*ignore=*/ignore, /*contents=*/MakeWritableByteSpan(contents)); BOOST_CHECK(ret); + // Don't expect AAD in further packets. + aad = {}; // Strip the processed packet's bytes off the front of the receive buffer. m_received.erase(m_received.begin(), m_received.begin() + size + BIP324Cipher::EXPANSION); - // Stop if the ignore bit is not set on this packet, or if we choose to not honor it. - if (!ignore || !skip_decoy) break; + // Stop if the ignore bit is not set on this packet. + if (!ignore) break; } return contents; } - /** Expect garbage, garbage terminator, and garbage auth packet to have been received, and - * process them (only after ReceiveKey). */ + /** Expect garbage and garbage terminator to have been received, and process them (only after + * ReceiveKey). */ void ReceiveGarbage() { // Figure out the garbage length. @@ -1252,18 +1255,15 @@ public: if (term_span == m_cipher.GetReceiveGarbageTerminator()) break; } // Copy the garbage to a buffer. - std::vector<uint8_t> garbage(m_received.begin(), m_received.begin() + garblen); + m_recv_garbage.assign(m_received.begin(), m_received.begin() + garblen); // Strip garbage + garbage terminator off the front of the receive buffer. m_received.erase(m_received.begin(), m_received.begin() + garblen + BIP324Cipher::GARBAGE_TERMINATOR_LEN); - // Process the expected garbage authentication packet. Such a packet still functions as one - // even when its ignore bit is set to true, so we do not skip decoy packets here. - ReceivePacket(/*aad=*/MakeByteSpan(garbage), /*skip_decoy=*/false); } /** Expect version packet to have been received, and process it (only after ReceiveKey). */ void ReceiveVersion() { - auto contents = ReceivePacket(); + auto contents = ReceivePacket(/*aad=*/MakeByteSpan(m_recv_garbage)); // Version packets from real BIP324 peers are expected to be empty, despite the fact that // this class supports *sending* non-empty version packets (to test that BIP324 peers // correctly ignore version packet contents). @@ -1321,6 +1321,14 @@ public: SendPacket(contents); } + /** Test whether the transport's session ID matches the session ID we expect. */ + void CompareSessionIDs() const + { + auto info = m_transport.GetInfo(); + BOOST_CHECK(info.session_id); + BOOST_CHECK(uint256(MakeUCharSpan(m_cipher.GetSessionID())) == *info.session_id); + } + /** Introduce a bit error in the data scheduled to be sent. */ void Damage() { @@ -1340,12 +1348,13 @@ BOOST_AUTO_TEST_CASE(v2transport_test) tester.SendKey(); tester.SendGarbage(); tester.ReceiveKey(); - tester.SendGarbageTermAuth(); + tester.SendGarbageTerm(); tester.SendVersion(); ret = tester.Interact(); BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(100000)); auto msg_data_2 = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(1000)); tester.SendMessage(uint8_t(4), msg_data_1); // cmpctblock short id @@ -1357,11 +1366,19 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_CHECK(!(*ret)[1]); BOOST_CHECK((*ret)[2] && (*ret)[2]->m_type == "tx" && Span{(*ret)[2]->m_recv} == MakeByteSpan(msg_data_2)); - // Then send a message with a bit error, expecting failure. + // Then send a message with a bit error, expecting failure. It's possible this failure does + // not occur immediately (when the length descriptor was modified), but it should come + // eventually, and no messages can be delivered anymore. tester.SendMessage("bad", msg_data_1); tester.Damage(); - ret = tester.Interact(); - BOOST_CHECK(!ret); + while (true) { + ret = tester.Interact(); + if (!ret) break; // failure + BOOST_CHECK(ret->size() == 0); // no message can be delivered + // Send another message. + auto msg_data_3 = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(10000)); + tester.SendMessage(uint8_t(12), msg_data_3); // getheaders short id + } } // Normal scenario, with a transport in responder node. @@ -1372,12 +1389,13 @@ BOOST_AUTO_TEST_CASE(v2transport_test) auto ret = tester.Interact(); BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveKey(); - tester.SendGarbageTermAuth(); + tester.SendGarbageTerm(); tester.SendVersion(); ret = tester.Interact(); BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(100000)); auto msg_data_2 = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(1000)); tester.SendMessage(uint8_t(14), msg_data_1); // inv short id @@ -1400,10 +1418,6 @@ BOOST_AUTO_TEST_CASE(v2transport_test) bool initiator = InsecureRandBool(); /** Use either 0 bytes or the maximum possible (4095 bytes) garbage length. */ size_t garb_len = InsecureRandBool() ? 0 : V2Transport::MAX_GARBAGE_LEN; - /** Sometimes, use non-empty contents in the garbage authentication packet (which is to be ignored). */ - size_t garb_auth_data_len = InsecureRandBool() ? 0 : InsecureRandRange(100000); - /** Whether to set the ignore bit on the garbage authentication packet (it still functions as garbage authentication). */ - bool garb_ignore = InsecureRandBool(); /** How many decoy packets to send before the version packet. */ unsigned num_ignore_version = InsecureRandRange(10); /** What data to send in the version packet (ignored by BIP324 peers, but reserved for future extensions). */ @@ -1424,7 +1438,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) tester.SendGarbage(garb_len); } tester.ReceiveKey(); - tester.SendGarbageTermAuth(garb_auth_data_len, garb_ignore); + tester.SendGarbageTerm(); for (unsigned v = 0; v < num_ignore_version; ++v) { size_t ver_ign_data_len = InsecureRandBool() ? 0 : InsecureRandRange(1000); auto ver_ign_data = g_insecure_rand_ctx.randbytes<uint8_t>(ver_ign_data_len); @@ -1435,6 +1449,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); for (unsigned d = 0; d < num_decoys_1; ++d) { auto decoy_data = g_insecure_rand_ctx.randbytes<uint8_t>(InsecureRandRange(1000)); tester.SendPacket(/*content=*/decoy_data, /*aad=*/{}, /*ignore=*/true); @@ -1468,7 +1483,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) tester.SendKey(); tester.SendGarbage(V2Transport::MAX_GARBAGE_LEN + 1); tester.ReceiveKey(); - tester.SendGarbageTermAuth(); + tester.SendGarbageTerm(); ret = tester.Interact(); BOOST_CHECK(!ret); } @@ -1481,7 +1496,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) auto ret = tester.Interact(); BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveKey(); - tester.SendGarbageTermAuth(); + tester.SendGarbageTerm(); ret = tester.Interact(); BOOST_CHECK(!ret); } @@ -1506,12 +1521,13 @@ BOOST_AUTO_TEST_CASE(v2transport_test) // the first 15 of them match. garbage[len_before + 15] ^= (uint8_t(1) << InsecureRandRange(8)); tester.SendGarbage(garbage); - tester.SendGarbageTermAuth(); + tester.SendGarbageTerm(); tester.SendVersion(); ret = tester.Interact(); BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes<uint8_t>(4000000); // test that receiving 4M payload works auto msg_data_2 = g_insecure_rand_ctx.randbytes<uint8_t>(4000000); // test that sending 4M payload works tester.SendMessage(uint8_t(InsecureRandRange(223) + 33), {}); // unknown short id diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp index e22bf7e7c0..74ff531cd9 100644 --- a/src/test/netbase_tests.cpp +++ b/src/test/netbase_tests.cpp @@ -561,7 +561,7 @@ BOOST_AUTO_TEST_CASE(caddress_serialize_v1) { DataStream s{}; - s << WithParams(CAddress::V1_NETWORK, fixture_addresses); + s << CAddress::V1_NETWORK(fixture_addresses); BOOST_CHECK_EQUAL(HexStr(s), stream_addrv1_hex); } @@ -570,7 +570,7 @@ BOOST_AUTO_TEST_CASE(caddress_unserialize_v1) DataStream s{ParseHex(stream_addrv1_hex)}; std::vector<CAddress> addresses_unserialized; - s >> WithParams(CAddress::V1_NETWORK, addresses_unserialized); + s >> CAddress::V1_NETWORK(addresses_unserialized); BOOST_CHECK(fixture_addresses == addresses_unserialized); } @@ -578,7 +578,7 @@ BOOST_AUTO_TEST_CASE(caddress_serialize_v2) { DataStream s{}; - s << WithParams(CAddress::V2_NETWORK, fixture_addresses); + s << CAddress::V2_NETWORK(fixture_addresses); BOOST_CHECK_EQUAL(HexStr(s), stream_addrv2_hex); } @@ -587,7 +587,7 @@ BOOST_AUTO_TEST_CASE(caddress_unserialize_v2) DataStream s{ParseHex(stream_addrv2_hex)}; std::vector<CAddress> addresses_unserialized; - s >> WithParams(CAddress::V2_NETWORK, addresses_unserialized); + s >> CAddress::V2_NETWORK(addresses_unserialized); BOOST_CHECK(fixture_addresses == addresses_unserialized); } diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index 1a205728d6..58bdb37b7c 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -203,8 +203,8 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) // TxoutType::PUBKEY s.clear(); s << ToByteVector(pubkey) << OP_CHECKSIG; - BOOST_CHECK(ExtractDestination(s, address)); - BOOST_CHECK(std::get<PKHash>(address) == PKHash(pubkey)); + BOOST_CHECK(!ExtractDestination(s, address)); + BOOST_CHECK(std::get<PubKeyDestination>(address) == PubKeyDestination(pubkey)); // TxoutType::PUBKEYHASH s.clear(); @@ -249,10 +249,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) s.clear(); s << OP_1 << ToByteVector(pubkey); BOOST_CHECK(ExtractDestination(s, address)); - WitnessUnknown unk; - unk.length = 33; - unk.version = 1; - std::copy(pubkey.begin(), pubkey.end(), unk.program); + WitnessUnknown unk{1, ToByteVector(pubkey)}; BOOST_CHECK(std::get<WitnessUnknown>(address) == unk); } diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index d63bfb9603..94656b229e 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1470,7 +1470,7 @@ BOOST_AUTO_TEST_CASE(script_HasValidOps) static CMutableTransaction TxFromHex(const std::string& str) { CMutableTransaction tx; - SpanReader{SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str)} >> tx; + SpanReader{SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str)} >> tx; return tx; } @@ -1480,7 +1480,7 @@ static std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue) std::vector<CTxOut> prevouts; for (size_t i = 0; i < univalue.size(); ++i) { CTxOut txout; - SpanReader{SER_DISK, 0, ParseHex(univalue[i].get_str())} >> txout; + SpanReader{0, ParseHex(univalue[i].get_str())} >> txout; prevouts.push_back(std::move(txout)); } return prevouts; @@ -1751,7 +1751,7 @@ BOOST_AUTO_TEST_CASE(bip341_keypath_test_vectors) for (const auto& vec : vectors.getValues()) { auto txhex = ParseHex(vec["given"]["rawUnsignedTx"].get_str()); CMutableTransaction tx; - SpanReader{SER_NETWORK, PROTOCOL_VERSION, txhex} >> tx; + SpanReader{PROTOCOL_VERSION, txhex} >> tx; std::vector<CTxOut> utxos; for (const auto& utxo_spent : vec["given"]["utxosSpent"].getValues()) { auto script_bytes = ParseHex(utxo_spent["scriptPubKey"].get_str()); diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 2f2bb6698c..d18d2623b1 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -176,7 +176,7 @@ BOOST_AUTO_TEST_CASE(vector_bool) std::vector<bool> vec2{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1}; BOOST_CHECK(vec1 == std::vector<uint8_t>(vec2.begin(), vec2.end())); - BOOST_CHECK(SerializeHash(vec1) == SerializeHash(vec2)); + BOOST_CHECK((HashWriter{} << vec1).GetHash() == (HashWriter{} << vec2).GetHash()); } BOOST_AUTO_TEST_CASE(noncanonical) diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index d1c0e1349e..178b16772b 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -78,7 +78,7 @@ uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, un } // Serialize and hash - CHashWriter ss(SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + CHashWriter ss{SERIALIZE_TRANSACTION_NO_WITNESS}; ss << txTmp << nHashType; return ss.GetHash(); } diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 99740ee779..f03f7c1da2 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -74,49 +74,49 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) // point should yield the same results, even if the first test grew the // vector. - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}})); vch.clear(); vch.resize(5, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}})); vch.clear(); vch.resize(4, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 3, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 3, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}})); vch.clear(); vch.resize(4, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 4, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 4, a, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, bytes}; BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, bytes}; BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); vch.clear(); vch.resize(4, 8); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, bytes, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, bytes, b}; BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); vch.clear(); } @@ -125,7 +125,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) { std::vector<unsigned char> vch = {1, 255, 3, 4, 5, 6}; - SpanReader reader{SER_NETWORK, INIT_PROTO_VERSION, vch}; + SpanReader reader{INIT_PROTO_VERSION, vch}; BOOST_CHECK_EQUAL(reader.size(), 6U); BOOST_CHECK(!reader.empty()); @@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) BOOST_CHECK_THROW(reader >> d, std::ios_base::failure); // Read a 4 bytes as a signed int from the beginning of the buffer. - SpanReader new_reader{SER_NETWORK, INIT_PROTO_VERSION, vch}; + SpanReader new_reader{INIT_PROTO_VERSION, vch}; new_reader >> d; BOOST_CHECK_EQUAL(d, 67370753); // 1,255,3,4 in little-endian base-256 BOOST_CHECK_EQUAL(new_reader.size(), 2U); @@ -169,7 +169,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue) { std::vector<uint8_t> data{0x82, 0xa7, 0x31}; - SpanReader reader{SER_NETWORK, INIT_PROTO_VERSION, data}; + SpanReader reader{INIT_PROTO_VERSION, data}; uint32_t varint = 0; // Deserialize into r-value reader >> VARINT(varint); @@ -249,18 +249,18 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor) BOOST_AUTO_TEST_CASE(streams_buffered_file) { fs::path streams_test_filename = m_args.GetDataDirBase() / "streams_test_tmp"; - FILE* file = fsbridge::fopen(streams_test_filename, "w+b"); + CAutoFile file{fsbridge::fopen(streams_test_filename, "w+b"), 333}; // The value at each offset is the offset. for (uint8_t j = 0; j < 40; ++j) { - fwrite(&j, 1, 1, file); + file << j; } - rewind(file); + std::rewind(file.Get()); // The buffer size (second arg) must be greater than the rewind // amount (third arg). try { - BufferedFile bfbad{file, 25, 25, 333}; + BufferedFile bfbad{file, 25, 25}; BOOST_CHECK(false); } catch (const std::exception& e) { BOOST_CHECK(strstr(e.what(), @@ -268,7 +268,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file) } // The buffer is 25 bytes, allow rewinding 10 bytes. - BufferedFile bf{file, 25, 10, 333}; + BufferedFile bf{file, 25, 10}; BOOST_CHECK(!bf.eof()); // This member has no functional effect. @@ -375,7 +375,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file) BOOST_CHECK(bf.GetPos() <= 30U); // We can explicitly close the file, or the destructor will do it. - bf.fclose(); + file.fclose(); fs::remove(streams_test_filename); } @@ -383,15 +383,15 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file) BOOST_AUTO_TEST_CASE(streams_buffered_file_skip) { fs::path streams_test_filename = m_args.GetDataDirBase() / "streams_test_tmp"; - FILE* file = fsbridge::fopen(streams_test_filename, "w+b"); + CAutoFile file{fsbridge::fopen(streams_test_filename, "w+b"), 333}; // The value at each offset is the byte offset (e.g. byte 1 in the file has the value 0x01). for (uint8_t j = 0; j < 40; ++j) { - fwrite(&j, 1, 1, file); + file << j; } - rewind(file); + std::rewind(file.Get()); // The buffer is 25 bytes, allow rewinding 10 bytes. - BufferedFile bf{file, 25, 10, 333}; + BufferedFile bf{file, 25, 10}; uint8_t i; // This is like bf >> (7-byte-variable), in that it will cause data @@ -425,7 +425,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_skip) bf.SkipTo(13); BOOST_CHECK_EQUAL(bf.GetPos(), 13U); - bf.fclose(); + file.fclose(); fs::remove(streams_test_filename); } @@ -436,16 +436,16 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand) fs::path streams_test_filename = m_args.GetDataDirBase() / "streams_test_tmp"; for (int rep = 0; rep < 50; ++rep) { - FILE* file = fsbridge::fopen(streams_test_filename, "w+b"); + CAutoFile file{fsbridge::fopen(streams_test_filename, "w+b"), 333}; size_t fileSize = InsecureRandRange(256); for (uint8_t i = 0; i < fileSize; ++i) { - fwrite(&i, 1, 1, file); + file << i; } - rewind(file); + std::rewind(file.Get()); size_t bufSize = InsecureRandRange(300) + 1; size_t rewindSize = InsecureRandRange(bufSize); - BufferedFile bf{file, bufSize, rewindSize, 333}; + BufferedFile bf{file, bufSize, rewindSize}; size_t currentPos = 0; size_t maxPos = 0; for (int step = 0; step < 100; ++step) { diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 10ab656d38..571b58156f 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -51,14 +51,14 @@ BOOST_FIXTURE_TEST_CASE(package_sanitization_tests, TestChain100Setup) BOOST_CHECK_EQUAL(state_too_many.GetResult(), PackageValidationResult::PCKG_POLICY); BOOST_CHECK_EQUAL(state_too_many.GetRejectReason(), "package-too-many-transactions"); - // Packages can't have a total size of more than 101KvB. + // Packages can't have a total weight of more than 404'000WU. CTransactionRef large_ptx = create_placeholder_tx(150, 150); Package package_too_large; - auto size_large = GetVirtualTransactionSize(*large_ptx); - size_t total_size{0}; - while (total_size <= MAX_PACKAGE_SIZE * 1000) { + auto size_large = GetTransactionWeight(*large_ptx); + size_t total_weight{0}; + while (total_weight <= MAX_PACKAGE_WEIGHT) { package_too_large.push_back(large_ptx); - total_size += size_large; + total_weight += size_large; } BOOST_CHECK(package_too_large.size() <= MAX_PACKAGE_COUNT); PackageValidationState state_too_large; @@ -122,7 +122,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup) // A single, giant transaction submitted through ProcessNewPackage fails on single tx policy. CTransactionRef giant_ptx = create_placeholder_tx(999, 999); - BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > MAX_PACKAGE_SIZE * 1000); + BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000); auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, {giant_ptx}, /*test_accept=*/true); BOOST_CHECK(result_single_large.m_state.IsInvalid()); BOOST_CHECK_EQUAL(result_single_large.m_state.GetResult(), PackageValidationResult::PCKG_TX); diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h index 7f55916870..e2a88eacdd 100644 --- a/src/test/util/chainstate.h +++ b/src/test/util/chainstate.h @@ -109,7 +109,23 @@ CreateAndActivateUTXOSnapshot( 0 == WITH_LOCK(node.chainman->GetMutex(), return node.chainman->ActiveHeight())); } - return node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); + auto& new_active = node.chainman->ActiveChainstate(); + auto* tip = new_active.m_chain.Tip(); + + // Disconnect a block so that the snapshot chainstate will be ahead, otherwise + // it will refuse to activate. + // + // TODO this is a unittest-specific hack, and we should probably rethink how to + // better generate/activate snapshots in unittests. + if (tip->pprev) { + new_active.m_chain.SetTip(*(tip->pprev)); + } + + bool res = node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); + + // Restore the old tip. + new_active.m_chain.SetTip(*tip); + return res; } diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp index dc64c0b4c1..bf5a653090 100644 --- a/src/test/util/net.cpp +++ b/src/test/util/net.cpp @@ -33,9 +33,9 @@ void ConnmanTestMsg::Handshake(CNode& node, Using<CustomUintFormatter<8>>(remote_services), // int64_t{}, // dummy time int64_t{}, // ignored service bits - WithParams(CNetAddr::V1, CService{}), // dummy + CNetAddr::V1(CService{}), // dummy int64_t{}, // ignored service bits - WithParams(CNetAddr::V1, CService{}), // ignored + CNetAddr::V1(CService{}), // ignored uint64_t{1}, // dummy nonce std::string{}, // dummy subver int32_t{}, // dummy starting_height diff --git a/src/test/util/net.h b/src/test/util/net.h index 1684da777a..0d41cf550e 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -65,6 +65,7 @@ constexpr ServiceFlags ALL_SERVICE_FLAGS[]{ NODE_WITNESS, NODE_COMPACT_FILTERS, NODE_NETWORK_LIMITED, + NODE_P2P_V2, }; constexpr NetPermissionFlags ALL_NET_PERMISSION_FLAGS[]{ diff --git a/src/test/util/validation.cpp b/src/test/util/validation.cpp index 2d5562ae66..bcd6a7a7dc 100644 --- a/src/test/util/validation.cpp +++ b/src/test/util/validation.cpp @@ -22,7 +22,11 @@ void TestChainstateManager::JumpOutOfIbd() Assert(!IsInitialBlockDownload()); } -void ValidationInterfaceTest::BlockConnected(CValidationInterface& obj, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) +void ValidationInterfaceTest::BlockConnected( + ChainstateRole role, + CValidationInterface& obj, + const std::shared_ptr<const CBlock>& block, + const CBlockIndex* pindex) { - obj.BlockConnected(block, pindex); + obj.BlockConnected(role, block, pindex); } diff --git a/src/test/util/validation.h b/src/test/util/validation.h index 64654f3fb6..45ef773409 100644 --- a/src/test/util/validation.h +++ b/src/test/util/validation.h @@ -19,7 +19,11 @@ struct TestChainstateManager : public ChainstateManager { class ValidationInterfaceTest { public: - static void BlockConnected(CValidationInterface& obj, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex); + static void BlockConnected( + ChainstateRole role, + CValidationInterface& obj, + const std::shared_ptr<const CBlock>& block, + const CBlockIndex* pindex); }; #endif // BITCOIN_TEST_UTIL_VALIDATION_H diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 26677bfa55..67f71bd266 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1791,4 +1791,29 @@ BOOST_AUTO_TEST_CASE(util_WriteBinaryFile) BOOST_CHECK(valid); BOOST_CHECK_EQUAL(actual_text, expected_text); } + +BOOST_AUTO_TEST_CASE(clearshrink_test) +{ + { + std::vector<uint8_t> v = {1, 2, 3}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + BOOST_CHECK_EQUAL(v.capacity(), 0); + } + + { + std::vector<bool> v = {false, true, false, false, true, true}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + BOOST_CHECK_EQUAL(v.capacity(), 0); + } + + { + std::deque<int> v = {1, 3, 3, 7}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + // std::deque has no capacity() we can observe. + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index d1463634cc..411371f7c1 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -43,7 +43,7 @@ struct TestSubscriber final : public CValidationInterface { BOOST_CHECK_EQUAL(m_expected_tip, pindexNew->GetBlockHash()); } - void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override + void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override { BOOST_CHECK_EQUAL(m_expected_tip, block->hashPrevBlock); BOOST_CHECK_EQUAL(m_expected_tip, pindex->pprev->GetBlockHash()); diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 9e359eeee4..227d7d4633 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -4,6 +4,7 @@ // #include <chainparams.h> #include <consensus/validation.h> +#include <kernel/disconnected_transactions.h> #include <node/kernel_notifications.h> #include <node/utxo_snapshot.h> #include <random.h> @@ -29,30 +30,22 @@ using node::BlockManager; using node::KernelNotifications; using node::SnapshotMetadata; -BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, ChainTestingSetup) +BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, TestingSetup) //! Basic tests for ChainstateManager. //! //! First create a legacy (IBD) chainstate, then create a snapshot chainstate. -BOOST_AUTO_TEST_CASE(chainstatemanager) +BOOST_FIXTURE_TEST_CASE(chainstatemanager, TestChain100Setup) { ChainstateManager& manager = *m_node.chainman; - CTxMemPool& mempool = *m_node.mempool; - std::vector<Chainstate*> chainstates; BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); // Create a legacy (IBD) chainstate. // - Chainstate& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(&mempool)); + Chainstate& c1 = manager.ActiveChainstate(); chainstates.push_back(&c1); - c1.InitCoinsDB( - /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23)); - c1.LoadGenesisBlock(); - BlockValidationState val_state; - BOOST_CHECK(c1.ActivateBestChain(val_state, nullptr)); BOOST_CHECK(!manager.IsSnapshotActive()); BOOST_CHECK(WITH_LOCK(::cs_main, return !manager.IsSnapshotValidated())); @@ -62,8 +55,9 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& active_chain = WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()); BOOST_CHECK_EQUAL(&active_chain, &c1.m_chain); - BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 0); - + // Get to a valid assumeutxo tip (per chainparams); + mineBlocks(10); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 110); auto active_tip = WITH_LOCK(manager.GetMutex(), return manager.ActiveTip()); auto exp_tip = c1.m_chain.Tip(); BOOST_CHECK_EQUAL(active_tip, exp_tip); @@ -73,19 +67,21 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) // Create a snapshot-based chainstate. // const uint256 snapshot_blockhash = active_tip->GetBlockHash(); - Chainstate& c2 = WITH_LOCK(::cs_main, return manager.ActivateExistingSnapshot( - &mempool, snapshot_blockhash)); + Chainstate& c2 = WITH_LOCK(::cs_main, return manager.ActivateExistingSnapshot(snapshot_blockhash)); chainstates.push_back(&c2); - - BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); - c2.InitCoinsDB( /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23)); - c2.m_chain.SetTip(*active_tip); + { + LOCK(::cs_main); + c2.InitCoinsCache(1 << 23); + c2.CoinsTip().SetBestBlock(active_tip->GetBlockHash()); + c2.setBlockIndexCandidates.insert(manager.m_blockman.LookupBlockIndex(active_tip->GetBlockHash())); + c2.LoadChainTip(); + } BlockValidationState _; BOOST_CHECK(c2.ActivateBestChain(_, nullptr)); + BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); BOOST_CHECK(manager.IsSnapshotActive()); BOOST_CHECK(WITH_LOCK(::cs_main, return !manager.IsSnapshotValidated())); BOOST_CHECK_EQUAL(&c2, &manager.ActiveChainstate()); @@ -96,13 +92,15 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& active_chain2 = WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()); BOOST_CHECK_EQUAL(&active_chain2, &c2.m_chain); - BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 0); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 110); + mineBlocks(1); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 111); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return c1.m_chain.Height()), 110); auto active_tip2 = WITH_LOCK(manager.GetMutex(), return manager.ActiveTip()); - auto exp_tip2 = c2.m_chain.Tip(); - BOOST_CHECK_EQUAL(active_tip2, exp_tip2); - - BOOST_CHECK_EQUAL(exp_tip, exp_tip2); + BOOST_CHECK_EQUAL(active_tip, active_tip2->pprev); + BOOST_CHECK_EQUAL(active_tip, c1.m_chain.Tip()); + BOOST_CHECK_EQUAL(active_tip2, c2.m_chain.Tip()); // Let scheduler events finish running to avoid accessing memory that is going to be unloaded SyncWithValidationInterfaceQueue(); @@ -112,7 +110,6 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) { ChainstateManager& manager = *m_node.chainman; - CTxMemPool& mempool = *m_node.mempool; size_t max_cache = 10000; manager.m_total_coinsdb_cache = max_cache; @@ -124,9 +121,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) // Chainstate& c1 = manager.ActiveChainstate(); chainstates.push_back(&c1); - c1.InitCoinsDB( - /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - { LOCK(::cs_main); c1.InitCoinsCache(1 << 23); @@ -139,7 +133,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) // Create a snapshot-based chainstate. // CBlockIndex* snapshot_base{WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()[manager.ActiveChain().Height() / 2])}; - Chainstate& c2 = WITH_LOCK(cs_main, return manager.ActivateExistingSnapshot(&mempool, *snapshot_base->phashBlock)); + Chainstate& c2 = WITH_LOCK(cs_main, return manager.ActivateExistingSnapshot(*snapshot_base->phashBlock)); chainstates.push_back(&c2); c2.InitCoinsDB( /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); @@ -288,10 +282,10 @@ struct SnapshotTestSetup : TestChain100Setup { BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid()); } - const AssumeutxoData& au_data = *ExpectedAssumeutxo(snapshot_height, ::Params()); + const auto& au_data = ::Params().AssumeutxoForHeight(snapshot_height); const CBlockIndex* tip = WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()); - BOOST_CHECK_EQUAL(tip->nChainTx, au_data.nChainTx); + BOOST_CHECK_EQUAL(tip->nChainTx, au_data->nChainTx); // To be checked against later when we try loading a subsequent snapshot. uint256 loaded_snapshot_blockhash{*chainman.SnapshotBlockhash()}; @@ -425,18 +419,24 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, SnapshotTestSetup) BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) { ChainstateManager& chainman = *Assert(m_node.chainman); - CTxMemPool& mempool = *m_node.mempool; Chainstate& cs1 = chainman.ActiveChainstate(); int num_indexes{0}; int num_assumed_valid{0}; + // Blocks in range [assumed_valid_start_idx, last_assumed_valid_idx) will be + // marked as assumed-valid and not having data. const int expected_assumed_valid{20}; - const int last_assumed_valid_idx{40}; + const int last_assumed_valid_idx{111}; const int assumed_valid_start_idx = last_assumed_valid_idx - expected_assumed_valid; + // Mine to height 120, past the hardcoded regtest assumeutxo snapshot at + // height 110 + mineBlocks(20); + CBlockIndex* validated_tip{nullptr}; CBlockIndex* assumed_base{nullptr}; CBlockIndex* assumed_tip{WITH_LOCK(chainman.GetMutex(), return chainman.ActiveChain().Tip())}; + BOOST_CHECK_EQUAL(assumed_tip->nHeight, 120); auto reload_all_block_indexes = [&]() { // For completeness, we also reset the block sequence counters to @@ -462,7 +462,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) LOCK(::cs_main); auto index = cs1.m_chain[i]; - // Blocks with heights in range [20, 40) are marked ASSUMED_VALID + // Blocks with heights in range [91, 110] are marked ASSUMED_VALID if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) { index->nStatus = BlockStatus::BLOCK_VALID_TREE | BlockStatus::BLOCK_ASSUMED_VALID; } @@ -488,7 +488,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // Note: cs2's tip is not set when ActivateExistingSnapshot is called. Chainstate& cs2 = WITH_LOCK(::cs_main, - return chainman.ActivateExistingSnapshot(&mempool, *assumed_base->phashBlock)); + return chainman.ActivateExistingSnapshot(*assumed_base->phashBlock)); // Set tip of the fully validated chain to be the validated tip cs1.m_chain.SetTip(*validated_tip); @@ -496,10 +496,36 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // Set tip of the assume-valid-based chain to the assume-valid block cs2.m_chain.SetTip(*assumed_base); + // Sanity check test variables. + BOOST_CHECK_EQUAL(num_indexes, 121); // 121 total blocks, including genesis + BOOST_CHECK_EQUAL(assumed_tip->nHeight, 120); // original chain has height 120 + BOOST_CHECK_EQUAL(validated_tip->nHeight, 90); // current cs1 chain has height 90 + BOOST_CHECK_EQUAL(assumed_base->nHeight, 110); // current cs2 chain has height 110 + + // Regenerate cs1.setBlockIndexCandidates and cs2.setBlockIndexCandidate and + // check contents below. reload_all_block_indexes(); - // The fully validated chain should have the current validated tip - // and the assumed valid base as candidates. + // The fully validated chain should only have the current validated tip and + // the assumed valid base as candidates, blocks 90 and 110. Specifically: + // + // - It does not have blocks 0-89 because they contain less work than the + // chain tip. + // + // - It has block 90 because it has data and equal work to the chain tip, + // (since it is the chain tip). + // + // - It does not have blocks 91-109 because they do not contain data. + // + // - It has block 110 even though it does not have data, because + // LoadBlockIndex has a special case to always add the snapshot block as a + // candidate. The special case is only actually intended to apply to the + // snapshot chainstate cs2, not the background chainstate cs1, but it is + // written broadly and applies to both. + // + // - It does not have any blocks after height 110 because cs1 is a background + // chainstate, and only blocks where are ancestors of the snapshot block + // are added as candidates for the background chainstate. BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.size(), 2); BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.count(validated_tip), 1); BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.count(assumed_base), 1); @@ -507,8 +533,25 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // The assumed-valid tolerant chain has the assumed valid base as a // candidate, but otherwise has none of the assumed-valid (which do not // HAVE_DATA) blocks as candidates. + // + // Specifically: + // - All blocks below height 110 are not candidates, because cs2 chain tip + // has height 110 and they have less work than it does. + // + // - Block 110 is a candidate even though it does not have data, because it + // is the snapshot block, which is assumed valid. + // + // - Blocks 111-120 are added because they have data. + + // Check that block 90 is absent BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(validated_tip), 0); + // Check that block 109 is absent + BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_base->pprev), 0); + // Check that block 110 is present + BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_base), 1); + // Check that block 120 is present BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_tip), 1); + // Check that 11 blocks total are present. BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.size(), num_indexes - last_assumed_valid_idx + 1); } @@ -536,7 +579,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_snapshot_init, SnapshotTestSetup) // it will initialize instead of attempting to complete validation. // // Note that this is not a realistic use of DisconnectTip(). - DisconnectedBlockTransactions unused_pool; + DisconnectedBlockTransactions unused_pool{MAX_DISCONNECTED_TX_POOL_SIZE * 1000}; BlockValidationState unused_state; { LOCK2(::cs_main, bg_chainstate.MempoolMutex()); diff --git a/src/test/validation_tests.cpp b/src/test/validation_tests.cpp index d00f2ff4d1..d34d98c219 100644 --- a/src/test/validation_tests.cpp +++ b/src/test/validation_tests.cpp @@ -132,17 +132,17 @@ BOOST_AUTO_TEST_CASE(test_assumeutxo) std::vector<int> bad_heights{0, 100, 111, 115, 209, 211}; for (auto empty : bad_heights) { - const auto out = ExpectedAssumeutxo(empty, *params); + const auto out = params->AssumeutxoForHeight(empty); BOOST_CHECK(!out); } - const auto out110 = *ExpectedAssumeutxo(110, *params); + const auto out110 = *params->AssumeutxoForHeight(110); BOOST_CHECK_EQUAL(out110.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); BOOST_CHECK_EQUAL(out110.nChainTx, 110U); - const auto out210 = *ExpectedAssumeutxo(200, *params); - BOOST_CHECK_EQUAL(out210.hash_serialized.ToString(), "51c8d11d8b5c1de51543c579736e786aa2736206d1e11e627568029ce092cf62"); - BOOST_CHECK_EQUAL(out210.nChainTx, 200U); + const auto out110_2 = *params->AssumeutxoForBlockhash(uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c")); + BOOST_CHECK_EQUAL(out110_2.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); + BOOST_CHECK_EQUAL(out110_2.nChainTx, 110U); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validationinterface_tests.cpp b/src/test/validationinterface_tests.cpp index fcd0b25b38..5979441057 100644 --- a/src/test/validationinterface_tests.cpp +++ b/src/test/validationinterface_tests.cpp @@ -8,6 +8,7 @@ #include <scheduler.h> #include <test/util/setup_common.h> #include <util/check.h> +#include <kernel/chain.h> #include <validationinterface.h> #include <atomic> diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 92379484e3..e021cfb06e 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -197,19 +197,34 @@ util::Result<CTxMemPool::setEntries> CTxMemPool::CalculateAncestorsAndCheckLimit } bool CTxMemPool::CheckPackageLimits(const Package& package, - const Limits& limits, + const int64_t total_vsize, std::string &errString) const { + size_t pack_count = package.size(); + + // Package itself is busting mempool limits; should be rejected even if no staged_ancestors exist + if (pack_count > static_cast<uint64_t>(m_limits.ancestor_count)) { + errString = strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_limits.ancestor_count); + return false; + } else if (pack_count > static_cast<uint64_t>(m_limits.descendant_count)) { + errString = strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_limits.descendant_count); + return false; + } else if (total_vsize > m_limits.ancestor_size_vbytes) { + errString = strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_limits.ancestor_size_vbytes); + return false; + } else if (total_vsize > m_limits.descendant_size_vbytes) { + errString = strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_limits.descendant_size_vbytes); + return false; + } + CTxMemPoolEntry::Parents staged_ancestors; - int64_t total_size = 0; for (const auto& tx : package) { - total_size += GetVirtualTransactionSize(*tx); for (const auto& input : tx->vin) { std::optional<txiter> piter = GetIter(input.prevout.hash); if (piter) { staged_ancestors.insert(**piter); - if (staged_ancestors.size() + package.size() > static_cast<uint64_t>(limits.ancestor_count)) { - errString = strprintf("too many unconfirmed parents [limit: %u]", limits.ancestor_count); + if (staged_ancestors.size() + package.size() > static_cast<uint64_t>(m_limits.ancestor_count)) { + errString = strprintf("too many unconfirmed parents [limit: %u]", m_limits.ancestor_count); return false; } } @@ -218,8 +233,8 @@ bool CTxMemPool::CheckPackageLimits(const Package& package, // When multiple transactions are passed in, the ancestors and descendants of all transactions // considered together must be within limits even if they are not interdependent. This may be // stricter than the limits for each individual transaction. - const auto ancestors{CalculateAncestorsAndCheckLimits(total_size, package.size(), - staged_ancestors, limits)}; + const auto ancestors{CalculateAncestorsAndCheckLimits(total_vsize, package.size(), + staged_ancestors, m_limits)}; // It's possible to overestimate the ancestor/descendant totals. if (!ancestors.has_value()) errString = "possibly " + util::ErrorString(ancestors).original; return ancestors.has_value(); diff --git a/src/txmempool.h b/src/txmempool.h index fcef19e807..cbeabb31fa 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -606,11 +606,11 @@ public: * @param[in] package Transaction package being evaluated for acceptance * to mempool. The transactions need not be direct * ancestors/descendants of each other. - * @param[in] limits Maximum number and size of ancestors and descendants + * @param[in] total_vsize Sum of virtual sizes for all transactions in package. * @param[out] errString Populated with error reason if a limit is hit. */ bool CheckPackageLimits(const Package& package, - const Limits& limits, + int64_t total_vsize, std::string &errString) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** Populate setDescendants with all in-mempool descendants of hash. @@ -848,96 +848,4 @@ public: /** Clear m_temp_added and m_non_base_coins. */ void Reset(); }; - -/** - * DisconnectedBlockTransactions - - * During the reorg, it's desirable to re-add previously confirmed transactions - * to the mempool, so that anything not re-confirmed in the new chain is - * available to be mined. However, it's more efficient to wait until the reorg - * is complete and process all still-unconfirmed transactions at that time, - * since we expect most confirmed transactions to (typically) still be - * confirmed in the new chain, and re-accepting to the memory pool is expensive - * (and therefore better to not do in the middle of reorg-processing). - * Instead, store the disconnected transactions (in order!) as we go, remove any - * that are included in blocks in the new chain, and then process the remaining - * still-unconfirmed transactions at the end. - */ - -// multi_index tag names -struct txid_index {}; -struct insertion_order {}; - -struct DisconnectedBlockTransactions { - typedef boost::multi_index_container< - CTransactionRef, - boost::multi_index::indexed_by< - // sorted by txid - boost::multi_index::hashed_unique< - boost::multi_index::tag<txid_index>, - mempoolentry_txid, - SaltedTxidHasher - >, - // sorted by order in the blockchain - boost::multi_index::sequenced< - boost::multi_index::tag<insertion_order> - > - > - > indexed_disconnected_transactions; - - // It's almost certainly a logic bug if we don't clear out queuedTx before - // destruction, as we add to it while disconnecting blocks, and then we - // need to re-process remaining transactions to ensure mempool consistency. - // For now, assert() that we've emptied out this object on destruction. - // This assert() can always be removed if the reorg-processing code were - // to be refactored such that this assumption is no longer true (for - // instance if there was some other way we cleaned up the mempool after a - // reorg, besides draining this object). - ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } - - indexed_disconnected_transactions queuedTx; - uint64_t cachedInnerUsage = 0; - - // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as - // no exact formula for boost::multi_index_contained is implemented. - size_t DynamicMemoryUsage() const { - return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void*)) * queuedTx.size() + cachedInnerUsage; - } - - void addTransaction(const CTransactionRef& tx) - { - queuedTx.insert(tx); - cachedInnerUsage += RecursiveDynamicUsage(tx); - } - - // Remove entries based on txid_index, and update memory usage. - void removeForBlock(const std::vector<CTransactionRef>& vtx) - { - // Short-circuit in the common case of a block being added to the tip - if (queuedTx.empty()) { - return; - } - for (auto const &tx : vtx) { - auto it = queuedTx.find(tx->GetHash()); - if (it != queuedTx.end()) { - cachedInnerUsage -= RecursiveDynamicUsage(*it); - queuedTx.erase(it); - } - } - } - - // Remove an entry by insertion_order index, and update memory usage. - void removeEntry(indexed_disconnected_transactions::index<insertion_order>::type::iterator entry) - { - cachedInnerUsage -= RecursiveDynamicUsage(*entry); - queuedTx.get<insertion_order>().erase(entry); - } - - void clear() - { - cachedInnerUsage = 0; - queuedTx.clear(); - } -}; - #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/util/message.cpp b/src/util/message.cpp index ec845aeffb..1afb28cc10 100644 --- a/src/util/message.cpp +++ b/src/util/message.cpp @@ -47,7 +47,7 @@ MessageVerificationResult MessageVerify( return MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED; } - if (!(CTxDestination(PKHash(pubkey)) == destination)) { + if (!(PKHash(pubkey) == *std::get_if<PKHash>(&destination))) { return MessageVerificationResult::ERR_NOT_SIGNED; } diff --git a/src/util/vector.h b/src/util/vector.h index 9b9218e54f..1513562f1b 100644 --- a/src/util/vector.h +++ b/src/util/vector.h @@ -5,7 +5,9 @@ #ifndef BITCOIN_UTIL_VECTOR_H #define BITCOIN_UTIL_VECTOR_H +#include <functional> #include <initializer_list> +#include <optional> #include <type_traits> #include <utility> #include <vector> @@ -49,4 +51,33 @@ inline V Cat(V v1, const V& v2) return v1; } +/** Clear a vector (or std::deque) and release its allocated memory. */ +template<typename V> +inline void ClearShrink(V& v) noexcept +{ + // There are various ways to clear a vector and release its memory: + // + // 1. V{}.swap(v) + // 2. v = V{} + // 3. v = {}; v.shrink_to_fit(); + // 4. v.clear(); v.shrink_to_fit(); + // + // (2) does not appear to release memory in glibc debug mode, even if v.shrink_to_fit() + // follows. (3) and (4) rely on std::vector::shrink_to_fit, which is only a non-binding + // request. Therefore, we use method (1). + + V{}.swap(v); +} + +template<typename V, typename L> +inline std::optional<V> FindFirst(const std::vector<V>& vec, const L fnc) +{ + for (const auto& el : vec) { + if (fnc(el)) { + return el; + } + } + return std::nullopt; +} + #endif // BITCOIN_UTIL_VECTOR_H diff --git a/src/validation.cpp b/src/validation.cpp index 1d4786bb17..30b3dde74f 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5,6 +5,7 @@ #include <validation.h> +#include <kernel/chain.h> #include <kernel/coinstats.h> #include <kernel/mempool_persist.h> @@ -22,6 +23,7 @@ #include <flatfile.h> #include <hash.h> #include <kernel/chainparams.h> +#include <kernel/disconnected_transactions.h> #include <kernel/mempool_entry.h> #include <kernel/messagestartchars.h> #include <kernel/notifications_interface.h> @@ -67,6 +69,7 @@ #include <optional> #include <string> #include <utility> +#include <tuple> using kernel::CCoinsStats; using kernel::CoinStatsHashType; @@ -81,8 +84,6 @@ using node::CBlockIndexWorkComparator; using node::fReindex; using node::SnapshotMetadata; -/** Maximum kilobytes for transactions to store for processing during reorg */ -static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000; /** Time to wait between writing blocks/block index to disk. */ static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1}; /** Time to wait between flushing chainstate to disk. */ @@ -297,28 +298,30 @@ void Chainstate::MaybeUpdateMempoolForReorg( AssertLockHeld(cs_main); AssertLockHeld(m_mempool->cs); std::vector<uint256> vHashUpdate; - // disconnectpool's insertion_order index sorts the entries from - // oldest to newest, but the oldest entry will be the last tx from the - // latest mined block that was disconnected. - // Iterate disconnectpool in reverse, so that we add transactions - // back to the mempool starting with the earliest transaction that had - // been previously seen in a block. - auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin(); - while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { - // ignore validation errors in resurrected transactions - if (!fAddToMempool || (*it)->IsCoinBase() || - AcceptToMemoryPool(*this, *it, GetTime(), - /*bypass_limits=*/true, /*test_accept=*/false).m_result_type != - MempoolAcceptResult::ResultType::VALID) { - // If the transaction doesn't make it in to the mempool, remove any - // transactions that depend on it (which would now be orphans). - m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG); - } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) { - vHashUpdate.push_back((*it)->GetHash()); - } - ++it; - } - disconnectpool.queuedTx.clear(); + { + // disconnectpool is ordered so that the front is the most recently-confirmed + // transaction (the last tx of the block at the tip) in the disconnected chain. + // Iterate disconnectpool in reverse, so that we add transactions + // back to the mempool starting with the earliest transaction that had + // been previously seen in a block. + const auto queuedTx = disconnectpool.take(); + auto it = queuedTx.rbegin(); + while (it != queuedTx.rend()) { + // ignore validation errors in resurrected transactions + if (!fAddToMempool || (*it)->IsCoinBase() || + AcceptToMemoryPool(*this, *it, GetTime(), + /*bypass_limits=*/true, /*test_accept=*/false).m_result_type != + MempoolAcceptResult::ResultType::VALID) { + // If the transaction doesn't make it in to the mempool, remove any + // transactions that depend on it (which would now be orphans). + m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG); + } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) { + vHashUpdate.push_back((*it)->GetHash()); + } + ++it; + } + } + // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. @@ -433,8 +436,7 @@ public: m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), - m_active_chainstate(active_chainstate), - m_limits{m_pool.m_limits} + m_active_chainstate(active_chainstate) { } @@ -635,6 +637,7 @@ private: // Enforce package mempool ancestor/descendant limits (distinct from individual // ancestor/descendant limits done in PreChecks). bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns, + int64_t total_vsize, PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Run the script checks using our policy flags. As this can be slow, we should @@ -684,8 +687,6 @@ private: Chainstate& m_active_chainstate; - CTxMemPool::Limits m_limits; - /** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */ bool m_rbf{false}; }; @@ -874,6 +875,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false; ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts); + + // Note that these modifications are only applicable to single transaction scenarios; + // carve-outs and package RBF are disabled for multi-transaction evaluations. + CTxMemPool::Limits maybe_rbf_limits = m_pool.m_limits; + // Calculate in-mempool ancestors, up to a limit. if (ws.m_conflicts.size() == 1) { // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we @@ -906,11 +912,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) assert(ws.m_iters_conflicting.size() == 1); CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin(); - m_limits.descendant_count += 1; - m_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants(); + maybe_rbf_limits.descendant_count += 1; + maybe_rbf_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants(); } - auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, maybe_rbf_limits)}; if (!ancestors) { // If CalculateMemPoolAncestors fails second time, we want the original error string. // Contracting/payment channels CPFP carve-out: @@ -926,9 +932,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html CTxMemPool::Limits cpfp_carve_out_limits{ .ancestor_count = 2, - .ancestor_size_vbytes = m_limits.ancestor_size_vbytes, - .descendant_count = m_limits.descendant_count + 1, - .descendant_size_vbytes = m_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT, + .ancestor_size_vbytes = maybe_rbf_limits.ancestor_size_vbytes, + .descendant_count = maybe_rbf_limits.descendant_count + 1, + .descendant_size_vbytes = maybe_rbf_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT, }; const auto error_message{util::ErrorString(ancestors).original}; if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT) { @@ -1001,6 +1007,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws) } bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns, + const int64_t total_vsize, PackageValidationState& package_state) { AssertLockHeld(cs_main); @@ -1011,7 +1018,7 @@ bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txn { return !m_pool.exists(GenTxid::Txid(tx->GetHash()));})); std::string err_string; - if (!m_pool.CheckPackageLimits(txns, m_limits, err_string)) { + if (!m_pool.CheckPackageLimits(txns, total_vsize, err_string)) { // This is a package-wide error, separate from an individual transaction error. return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string); } @@ -1166,7 +1173,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the // last calculation done in PreChecks, since package ancestors have already been submitted. { - auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_limits)}; if(!ancestors) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail. @@ -1296,7 +1303,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: // because it's unnecessary. Also, CPFP carve out can increase the limit for individual // transactions, but this exemption is not extended to packages in CheckPackageLimits(). std::string err_string; - if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) { + if (txns.size() > 1 && !PackageMempoolChecks(txns, m_total_vsize, package_state)) { return PackageMempoolAcceptResult(package_state, std::move(results)); } @@ -2546,11 +2553,14 @@ bool Chainstate::FlushStateToDisk( if (nManualPruneHeight > 0) { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH); - m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height()); + m_blockman.FindFilesToPruneManual( + setFilesToPrune, + std::min(last_prune, nManualPruneHeight), + *this, m_chainman); } else { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH); - m_blockman.FindFilesToPrune(setFilesToPrune, m_chainman.GetParams().PruneAfterHeight(), m_chain.Height(), last_prune, m_chainman.IsInitialBlockDownload()); + m_blockman.FindFilesToPrune(setFilesToPrune, last_prune, *this, m_chainman); m_blockman.m_check_for_pruning = false; } if (!setFilesToPrune.empty()) { @@ -2589,7 +2599,11 @@ bool Chainstate::FlushStateToDisk( LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH); // First make sure all block and undo data is flushed to disk. - m_blockman.FlushBlockFile(); + // TODO: Handle return error, or add detailed comment why it is + // safe to not return an error upon failure. + if (!m_blockman.FlushChainstateBlockFile(m_chain.Height())) { + LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Warning, "%s: Failed to flush block file.\n", __func__); + } } // Then update all block file information (which may refer to block and undo files). @@ -2636,7 +2650,7 @@ bool Chainstate::FlushStateToDisk( } if (full_flush_completed) { // Update best block in wallet (so we can detect restored wallets). - GetMainSignals().ChainStateFlushed(m_chain.GetLocator()); + GetMainSignals().ChainStateFlushed(this->GetRole(), m_chain.GetLocator()); } } catch (const std::runtime_error& e) { return FatalError(m_chainman.GetNotifications(), state, std::string("System error while flushing: ") + e.what()); @@ -2791,15 +2805,10 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra } if (disconnectpool && m_mempool) { - // Save transactions to re-add to mempool at end of reorg - for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) { - disconnectpool->addTransaction(*it); - } - while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) { - // Drop the earliest entry, and remove its children from the mempool. - auto it = disconnectpool->queuedTx.get<insertion_order>().begin(); - m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG); - disconnectpool->removeEntry(it); + // Save transactions to re-add to mempool at end of reorg. If any entries are evicted for + // exceeding memory limits, remove them and their descendants from the mempool. + for (auto&& evicted_tx : disconnectpool->AddTransactionsFromBlock(block.vtx)) { + m_mempool->removeRecursive(*evicted_tx, MemPoolRemovalReason::REORG); } } @@ -3049,7 +3058,7 @@ bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* // Disconnect active blocks which are no longer in the best chain. bool fBlocksDisconnected = false; - DisconnectedBlockTransactions disconnectpool; + DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_SIZE * 1000}; while (m_chain.Tip() && m_chain.Tip() != pindexFork) { if (!DisconnectTip(state, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, @@ -3188,6 +3197,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexNewTip = nullptr; + bool exited_ibd{false}; do { // Block until the validation queue drains. This should largely // never happen in normal operation, however may happen during @@ -3201,6 +3211,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< LOCK(cs_main); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed LOCK(MempoolMutex()); + const bool was_in_ibd = m_chainman.IsInitialBlockDownload(); CBlockIndex* starting_tip = m_chain.Tip(); bool blocks_connected = false; do { @@ -3233,7 +3244,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); - GetMainSignals().BlockConnected(trace.pblock, trace.pindex); + GetMainSignals().BlockConnected(this->GetRole(), trace.pblock, trace.pindex); } // This will have been toggled in @@ -3248,16 +3259,21 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< if (!blocks_connected) return true; const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip); - bool fInitialDownload = m_chainman.IsInitialBlockDownload(); + bool still_in_ibd = m_chainman.IsInitialBlockDownload(); + + if (was_in_ibd && !still_in_ibd) { + // Active chainstate has exited IBD. + exited_ibd = true; + } // Notify external listeners about the new tip. // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected - if (pindexFork != pindexNewTip) { + if (this == &m_chainman.ActiveChainstate() && pindexFork != pindexNewTip) { // Notify ValidationInterface subscribers - GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); + GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, still_in_ibd); // Always notify the UI if a new block tip was connected - if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(fInitialDownload), *pindexNewTip))) { + if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd), *pindexNewTip))) { // Just breaking and returning success for now. This could // be changed to bubble up the kernel::Interrupted value to // the caller so the caller could distinguish between @@ -3268,8 +3284,25 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< } // When we reach this point, we switched to a new tip (stored in pindexNewTip). + if (exited_ibd) { + // If a background chainstate is in use, we may need to rebalance our + // allocation of caches once a chainstate exits initial block download. + LOCK(::cs_main); + m_chainman.MaybeRebalanceCaches(); + } + if (WITH_LOCK(::cs_main, return m_disabled)) { // Background chainstate has reached the snapshot base block, so exit. + + // Restart indexes to resume indexing for all blocks unique to the snapshot + // chain. This resumes indexing "in order" from where the indexing on the + // background validation chain left off. + // + // This cannot be done while holding cs_main (within + // MaybeCompleteSnapshotValidation) or a cs_main deadlock will occur. + if (m_chainman.restart_indexes) { + m_chainman.restart_indexes(); + } break; } @@ -3383,7 +3416,7 @@ bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pinde // ActivateBestChain considers blocks already in m_chain // unconditionally valid already, so force disconnect away from it. - DisconnectedBlockTransactions disconnectpool; + DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_SIZE * 1000}; bool ret = DisconnectTip(state, &disconnectpool); // DisconnectTip will add transactions to disconnectpool. // Adjust the mempool to be consistent with the new tip, adding @@ -3506,7 +3539,8 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { void Chainstate::TryAddBlockIndexCandidate(CBlockIndex* pindex) { AssertLockHeld(cs_main); - // The block only is a candidate for the most-work-chain if it has more work than our current tip. + // The block only is a candidate for the most-work-chain if it has the same + // or more work than our current tip. if (m_chain.Tip() != nullptr && setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) { return; } @@ -4144,6 +4178,12 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString()); } + Chainstate* bg_chain{WITH_LOCK(cs_main, return BackgroundSyncInProgress() ? m_ibd_chainstate.get() : nullptr)}; + BlockValidationState bg_state; + if (bg_chain && !bg_chain->ActivateBestChain(bg_state, block)) { + return error("%s: [background] ActivateBestChain failed (%s)", __func__, bg_state.ToString()); + } + return true; } @@ -4271,7 +4311,7 @@ VerifyDBResult CVerifyDB::VerifyDB( bool skipped_l3_checks{false}; LogPrintf("Verification progress: 0%%\n"); - const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash}; + const bool is_snapshot_cs{chainstate.m_from_snapshot_blockhash}; for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))); @@ -4502,7 +4542,7 @@ bool ChainstateManager::LoadBlockIndex() // Load block index from databases bool needs_init = fReindex; if (!fReindex) { - bool ret{m_blockman.LoadBlockIndexDB()}; + bool ret{m_blockman.LoadBlockIndexDB(SnapshotBlockhash())}; if (!ret) return false; m_blockman.ScanAndUnlinkAlreadyPrunedFiles(); @@ -4577,7 +4617,7 @@ bool Chainstate::LoadGenesisBlock() } void ChainstateManager::LoadExternalBlockFile( - FILE* fileIn, + CAutoFile& file_in, FlatFilePos* dbp, std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent) { @@ -4589,8 +4629,7 @@ void ChainstateManager::LoadExternalBlockFile( int nLoaded = 0; try { - // This takes over fileIn and calls fclose() on it in the BufferedFile destructor - BufferedFile blkdat{fileIn, 2 * MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE + 8, CLIENT_VERSION}; + BufferedFile blkdat{file_in, 2 * MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE + 8}; // nRewind indicates where to resume scanning in case something goes wrong, // such as a block fails to deserialize. uint64_t nRewind = blkdat.GetPos(); @@ -4799,6 +4838,10 @@ void ChainstateManager::CheckBlockIndex() CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID while (pindex != nullptr) { nNodes++; + if (pindex->pprev && pindex->nTx > 0) { + // nChainTx should increase monotonically + assert(pindex->pprev->nChainTx <= pindex->nChainTx); + } if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex; if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) { @@ -5090,19 +5133,7 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) return *m_active_chainstate; } -const AssumeutxoData* ExpectedAssumeutxo( - const int height, const CChainParams& chainparams) -{ - const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo(); - const auto assumeutxo_found = valid_assumeutxos_map.find(height); - - if (assumeutxo_found != valid_assumeutxos_map.end()) { - return &assumeutxo_found->second; - } - return nullptr; -} - -static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) +[[nodiscard]] static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); @@ -5154,6 +5185,14 @@ bool ChainstateManager::ActivateSnapshot( return false; } + { + LOCK(::cs_main); + if (Assert(m_active_chainstate->GetMempool())->size() > 0) { + LogPrintf("[snapshot] can't activate a snapshot when mempool not empty\n"); + return false; + } + } + int64_t current_coinsdb_cache_size{0}; int64_t current_coinstip_cache_size{0}; @@ -5199,19 +5238,8 @@ bool ChainstateManager::ActivateSnapshot( static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); } - bool snapshot_ok = this->PopulateAndValidateSnapshot( - *snapshot_chainstate, coins_file, metadata); - - // If not in-memory, persist the base blockhash for use during subsequent - // initialization. - if (!in_memory) { - LOCK(::cs_main); - if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { - snapshot_ok = false; - } - } - if (!snapshot_ok) { - LOCK(::cs_main); + auto cleanup_bad_snapshot = [&](const char* reason) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + LogPrintf("[snapshot] activation failed - %s\n", reason); this->MaybeRebalanceCaches(); // PopulateAndValidateSnapshot can return (in error) before the leveldb datadir @@ -5228,23 +5256,48 @@ bool ChainstateManager::ActivateSnapshot( } } return false; - } + }; - { + if (!this->PopulateAndValidateSnapshot(*snapshot_chainstate, coins_file, metadata)) { LOCK(::cs_main); - assert(!m_snapshot_chainstate); - m_snapshot_chainstate.swap(snapshot_chainstate); - const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); - assert(chaintip_loaded); - - m_active_chainstate = m_snapshot_chainstate.get(); + return cleanup_bad_snapshot("population failed"); + } - LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); - LogPrintf("[snapshot] (%.2f MB)\n", - m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + LOCK(::cs_main); // cs_main required for rest of snapshot activation. - this->MaybeRebalanceCaches(); + // Do a final check to ensure that the snapshot chainstate is actually a more + // work chain than the active chainstate; a user could have loaded a snapshot + // very late in the IBD process, and we wouldn't want to load a useless chainstate. + if (!CBlockIndexWorkComparator()(ActiveTip(), snapshot_chainstate->m_chain.Tip())) { + return cleanup_bad_snapshot("work does not exceed active chainstate"); } + // If not in-memory, persist the base blockhash for use during subsequent + // initialization. + if (!in_memory) { + if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { + return cleanup_bad_snapshot("could not write base blockhash"); + } + } + + assert(!m_snapshot_chainstate); + m_snapshot_chainstate.swap(snapshot_chainstate); + const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); + assert(chaintip_loaded); + + // Transfer possession of the mempool to the snapshot chainstate. + // Mempool is empty at this point because we're still in IBD. + Assert(m_active_chainstate->m_mempool->size() == 0); + Assert(!m_snapshot_chainstate->m_mempool); + m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; + m_active_chainstate->m_mempool = nullptr; + m_active_chainstate = m_snapshot_chainstate.get(); + m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); + + LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); + LogPrintf("[snapshot] (%.2f MB)\n", + m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + + this->MaybeRebalanceCaches(); return true; } @@ -5286,7 +5339,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash)); if (!snapshot_start_block) { - // Needed for ComputeUTXOStats and ExpectedAssumeutxo to determine the + // Needed for ComputeUTXOStats to determine the // height and to avoid a crash when base_blockhash.IsNull() LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n", base_blockhash.ToString()); @@ -5294,7 +5347,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( } int base_height = snapshot_start_block->nHeight; - auto maybe_au_data = ExpectedAssumeutxo(base_height, GetParams()); + const auto& maybe_au_data = GetParams().AssumeutxoForHeight(base_height); if (!maybe_au_data) { LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " @@ -5304,6 +5357,14 @@ bool ChainstateManager::PopulateAndValidateSnapshot( const AssumeutxoData& au_data = *maybe_au_data; + // This work comparison is a duplicate check with the one performed later in + // ActivateSnapshot(), but is done so that we avoid doing the long work of staging + // a snapshot that isn't actually usable. + if (WITH_LOCK(::cs_main, return !CBlockIndexWorkComparator()(ActiveTip(), snapshot_start_block))) { + LogPrintf("[snapshot] activation failed - height does not exceed active chainstate\n"); + return false; + } + COutPoint outpoint; Coin coin; const uint64_t coins_count = metadata.m_coins_count; @@ -5563,7 +5624,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() CCoinsViewDB& ibd_coins_db = m_ibd_chainstate->CoinsDB(); m_ibd_chainstate->ForceFlushStateToDisk(); - auto maybe_au_data = ExpectedAssumeutxo(curr_height, m_options.chainparams); + const auto& maybe_au_data = m_options.chainparams.AssumeutxoForHeight(curr_height); if (!maybe_au_data) { LogPrintf("[snapshot] assumeutxo data not found for height " "(%d) - refusing to validate snapshot\n", curr_height); @@ -5715,16 +5776,22 @@ bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool* mempool) LogPrintf("[snapshot] detected active snapshot chainstate (%s) - loading\n", fs::PathToString(*path)); - this->ActivateExistingSnapshot(mempool, *base_blockhash); + this->ActivateExistingSnapshot(*base_blockhash); return true; } -Chainstate& ChainstateManager::ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash) +Chainstate& ChainstateManager::ActivateExistingSnapshot(uint256 base_blockhash) { assert(!m_snapshot_chainstate); m_snapshot_chainstate = - std::make_unique<Chainstate>(mempool, m_blockman, *this, base_blockhash); + std::make_unique<Chainstate>(nullptr, m_blockman, *this, base_blockhash); LogPrintf("[snapshot] switching active chainstate to %s\n", m_snapshot_chainstate->ToString()); + + // Mempool is empty at this point because we're still in IBD. + Assert(m_active_chainstate->m_mempool->size() == 0); + Assert(!m_snapshot_chainstate->m_mempool); + m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; + m_active_chainstate->m_mempool = nullptr; m_active_chainstate = m_snapshot_chainstate.get(); return *m_snapshot_chainstate; } @@ -5741,15 +5808,20 @@ bool IsBIP30Unspendable(const CBlockIndex& block_index) (block_index.nHeight==91812 && block_index.GetBlockHash() == uint256S("0x00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f")); } -util::Result<void> Chainstate::InvalidateCoinsDBOnDisk() +static fs::path GetSnapshotCoinsDBPath(Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); // Should never be called on a non-snapshot chainstate. - assert(m_from_snapshot_blockhash); - auto storage_path_maybe = this->CoinsDB().StoragePath(); + assert(cs.m_from_snapshot_blockhash); + auto storage_path_maybe = cs.CoinsDB().StoragePath(); // Should never be called with a non-existent storage path. assert(storage_path_maybe); - fs::path snapshot_datadir = *storage_path_maybe; + return *storage_path_maybe; +} + +util::Result<void> Chainstate::InvalidateCoinsDBOnDisk() +{ + fs::path snapshot_datadir = GetSnapshotCoinsDBPath(*this); // Coins views no longer usable. m_coins_views.reset(); @@ -5780,6 +5852,33 @@ util::Result<void> Chainstate::InvalidateCoinsDBOnDisk() return {}; } +bool ChainstateManager::DeleteSnapshotChainstate() +{ + AssertLockHeld(::cs_main); + Assert(m_snapshot_chainstate); + Assert(m_ibd_chainstate); + + fs::path snapshot_datadir = GetSnapshotCoinsDBPath(*m_snapshot_chainstate); + if (!DeleteCoinsDBFromDisk(snapshot_datadir, /*is_snapshot=*/ true)) { + LogPrintf("Deletion of %s failed. Please remove it manually to continue reindexing.\n", + fs::PathToString(snapshot_datadir)); + return false; + } + m_active_chainstate = m_ibd_chainstate.get(); + m_snapshot_chainstate.reset(); + return true; +} + +ChainstateRole Chainstate::GetRole() const +{ + if (m_chainman.GetAll().size() <= 1) { + return ChainstateRole::NORMAL; + } + return (this != &m_chainman.ActiveChainstate()) ? + ChainstateRole::BACKGROUND : + ChainstateRole::ASSUMEDVALID; +} + const CBlockIndex* ChainstateManager::GetSnapshotBaseBlock() const { return m_active_chainstate ? m_active_chainstate->SnapshotBase() : nullptr; @@ -5877,3 +5976,38 @@ bool ChainstateManager::ValidatedSnapshotCleanup() } return true; } + +Chainstate& ChainstateManager::GetChainstateForIndexing() +{ + // We can't always return `m_ibd_chainstate` because after background validation + // has completed, `m_snapshot_chainstate == m_active_chainstate`, but it can be + // indexed. + return (this->GetAll().size() > 1) ? *m_ibd_chainstate : *m_active_chainstate; +} + +std::pair<int, int> ChainstateManager::GetPruneRange(const Chainstate& chainstate, int last_height_can_prune) +{ + if (chainstate.m_chain.Height() <= 0) { + return {0, 0}; + } + int prune_start{0}; + + if (this->GetAll().size() > 1 && m_snapshot_chainstate.get() == &chainstate) { + // Leave the blocks in the background IBD chain alone if we're pruning + // the snapshot chain. + prune_start = *Assert(GetSnapshotBaseHeight()) + 1; + } + + int max_prune = std::max<int>( + 0, chainstate.m_chain.Height() - static_cast<int>(MIN_BLOCKS_TO_KEEP)); + + // last block to prune is the lesser of (caller-specified height, MIN_BLOCKS_TO_KEEP from the tip) + // + // While you might be tempted to prune the background chainstate more + // aggressively (i.e. fewer MIN_BLOCKS_TO_KEEP), this won't work with index + // building - specifically blockfilterindex requires undo data, and if + // we don't maintain this trailing window, we hit indexing failures. + int prune_end = std::min(last_height_can_prune, max_prune); + + return {prune_start, prune_end}; +} diff --git a/src/validation.h b/src/validation.h index f1ff6bb671..94a00e44a4 100644 --- a/src/validation.h +++ b/src/validation.h @@ -13,6 +13,7 @@ #include <arith_uint256.h> #include <attributes.h> #include <chain.h> +#include <kernel/chain.h> #include <consensus/amount.h> #include <deploymentstatus.h> #include <kernel/chainparams.h> @@ -50,7 +51,7 @@ class Chainstate; class CTxMemPool; class ChainstateManager; struct ChainTxData; -struct DisconnectedBlockTransactions; +class DisconnectedBlockTransactions; struct PrecomputedTransactionData; struct LockPoints; struct AssumeutxoData; @@ -511,6 +512,12 @@ public: ChainstateManager& chainman, std::optional<uint256> from_snapshot_blockhash = std::nullopt); + //! Return the current role of the chainstate. See `ChainstateManager` + //! documentation for a description of the different types of chainstates. + //! + //! @sa ChainstateRole + ChainstateRole GetRole() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** * Initialize the CoinsViews UTXO set database management data structures. The in-memory * cache is initialized separately. @@ -607,7 +614,6 @@ public: bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with @@ -849,9 +855,6 @@ private: //! Points to either the ibd or snapshot chainstate; indicates our //! most-work chain. //! - //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). - //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. @@ -882,13 +885,6 @@ private: /** Most recent headers presync progress update, for rate-limiting. */ std::chrono::time_point<std::chrono::steady_clock> m_last_presync_update GUARDED_BY(::cs_main) {}; - //! Returns nullptr if no snapshot has been loaded. - const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - - //! Return the height of the base block of the snapshot in use, if one exists, else - //! nullopt. - std::optional<int> GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - std::array<ThresholdConditionCache, VERSIONBITS_NUM_BITS> m_warningcache GUARDED_BY(::cs_main); //! Return true if a chainstate is considered usable. @@ -905,6 +901,10 @@ public: explicit ChainstateManager(const util::SignalInterrupt& interrupt, Options options, node::BlockManager::Options blockman_options); + //! Function to restart active indexes; set dynamically to avoid a circular + //! dependency on `base/index.cpp`. + std::function<void()> restart_indexes = std::function<void()>(); + const CChainParams& GetParams() const { return m_options.chainparams; } const Consensus::Params& GetConsensus() const { return m_options.chainparams.GetConsensus(); } bool ShouldCheckBlockIndex() const { return *Assert(m_options.check_block_index); } @@ -1035,12 +1035,25 @@ public: //! Otherwise, revert to using the ibd chainstate and shutdown. SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Returns nullptr if no snapshot has been loaded. + const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! The most-work chain. Chainstate& ActiveChainstate() const; CChain& ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChainstate().m_chain; } int ActiveHeight() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Height(); } CBlockIndex* ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Tip(); } + //! The state of a background sync (for net processing) + bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { + return IsUsable(m_snapshot_chainstate.get()) && IsUsable(m_ibd_chainstate.get()); + } + + //! The tip of the background sync chain + const CBlockIndex* GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { + return BackgroundSyncInProgress() ? m_ibd_chainstate->m_chain.Tip() : nullptr; + } + node::BlockMap& BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); @@ -1087,14 +1100,14 @@ public: * -loadblock= option. There's no unknown-parent tracking, so the last two arguments are omitted. * * - * @param[in] fileIn FILE handle to file containing blocks to read + * @param[in] file_in File containing blocks to read * @param[in] dbp (optional) Disk block position (only for reindex) * @param[in,out] blocks_with_unknown_parent (optional) Map of disk positions for blocks with * unknown parent, key is parent block hash * (only used for reindex) * */ void LoadExternalBlockFile( - FILE* fileIn, + CAutoFile& file_in, FlatFilePos* dbp = nullptr, std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent = nullptr); @@ -1194,10 +1207,13 @@ public: void ResetChainstates() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Remove the snapshot-based chainstate and all on-disk artifacts. + //! Used when reindex{-chainstate} is called during snapshot use. + [[nodiscard]] bool DeleteSnapshotChainstate() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Switch the active chainstate to one based on a UTXO snapshot that was loaded //! previously. - Chainstate& ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash) - EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + Chainstate& ActivateExistingSnapshot(uint256 base_blockhash) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); //! If we have validated a snapshot chain during this runtime, copy its //! chainstate directory over to the main `chainstate` location, completing @@ -1210,6 +1226,26 @@ public: //! @sa node/chainstate:LoadChainstate() bool ValidatedSnapshotCleanup() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! @returns the chainstate that indexes should consult when ensuring that an + //! index is synced with a chain where we can expect block index entries to have + //! BLOCK_HAVE_DATA beneath the tip. + //! + //! In other words, give us the chainstate for which we can reasonably expect + //! that all blocks beneath the tip have been indexed. In practice this means + //! when using an assumed-valid chainstate based upon a snapshot, return only the + //! fully validated chain. + Chainstate& GetChainstateForIndexing() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + + //! Return the [start, end] (inclusive) of block heights we can prune. + //! + //! start > end is possible, meaning no blocks can be pruned. + std::pair<int, int> GetPruneRange( + const Chainstate& chainstate, int last_height_can_prune) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + + //! Return the height of the base block of the snapshot in use, if one exists, else + //! nullopt. + std::optional<int> GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + ~ChainstateManager(); }; @@ -1232,15 +1268,6 @@ bool DeploymentEnabled(const ChainstateManager& chainman, DEP dep) return DeploymentEnabled(chainman.GetConsensus(), dep); } -/** - * Return the expected assumeutxo value for a given height, if one exists. - * - * @param[in] height Get the assumeutxo value for this height. - * - * @returns empty if no assumeutxo configuration exists for the given height. - */ -const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); - /** Identifies blocks that overwrote an existing coinbase output in the UTXO set (see BIP30) */ bool IsBIP30Repeat(const CBlockIndex& block_index); diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp index d344c8bfbd..9241395ad5 100644 --- a/src/validationinterface.cpp +++ b/src/validationinterface.cpp @@ -8,6 +8,7 @@ #include <attributes.h> #include <chain.h> #include <consensus/validation.h> +#include <kernel/chain.h> #include <logging.h> #include <primitives/block.h> #include <primitives/transaction.h> @@ -223,9 +224,9 @@ void CMainSignals::TransactionRemovedFromMempool(const CTransactionRef& tx, MemP RemovalReasonToString(reason)); } -void CMainSignals::BlockConnected(const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) { - auto event = [pblock, pindex, this] { - m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.BlockConnected(pblock, pindex); }); +void CMainSignals::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) { + auto event = [role, pblock, pindex, this] { + m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.BlockConnected(role, pblock, pindex); }); }; ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s block height=%d", __func__, pblock->GetHash().ToString(), @@ -242,9 +243,9 @@ void CMainSignals::BlockDisconnected(const std::shared_ptr<const CBlock>& pblock pindex->nHeight); } -void CMainSignals::ChainStateFlushed(const CBlockLocator &locator) { - auto event = [locator, this] { - m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.ChainStateFlushed(locator); }); +void CMainSignals::ChainStateFlushed(ChainstateRole role, const CBlockLocator &locator) { + auto event = [role, locator, this] { + m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.ChainStateFlushed(role, locator); }); }; ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s", __func__, locator.IsNull() ? "null" : locator.vHave.front().ToString()); diff --git a/src/validationinterface.h b/src/validationinterface.h index 8c20cc8ffb..eb15aa4d5f 100644 --- a/src/validationinterface.h +++ b/src/validationinterface.h @@ -7,6 +7,7 @@ #define BITCOIN_VALIDATIONINTERFACE_H #include <kernel/cs_main.h> +#include <kernel/chain.h> #include <primitives/transaction.h> // CTransaction(Ref) #include <sync.h> @@ -87,7 +88,7 @@ protected: * but may not be called on every intermediate tip. If the latter behavior is desired, * subscribe to BlockConnected() instead. * - * Called on a background thread. + * Called on a background thread. Only called for the active chainstate. */ virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {} /** @@ -136,11 +137,12 @@ protected: * * Called on a background thread. */ - virtual void BlockConnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {} + virtual void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {} /** * Notifies listeners of a block being disconnected * - * Called on a background thread. + * Called on a background thread. Only called for the active chainstate, since + * background chainstates should never disconnect blocks. */ virtual void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) {} /** @@ -159,17 +161,18 @@ protected: * * Called on a background thread. */ - virtual void ChainStateFlushed(const CBlockLocator &locator) {} + virtual void ChainStateFlushed(ChainstateRole role, const CBlockLocator &locator) {} /** * Notifies listeners of a block validation result. * If the provided BlockValidationState IsValid, the provided block * is guaranteed to be the current best block at the time the - * callback was generated (not necessarily now) + * callback was generated (not necessarily now). */ virtual void BlockChecked(const CBlock&, const BlockValidationState&) {} /** * Notifies listeners that a block which builds directly on our current tip - * has been received and connected to the headers tree, though not validated yet */ + * has been received and connected to the headers tree, though not validated yet. + */ virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& block) {}; friend class CMainSignals; friend class ValidationInterfaceTest; @@ -199,9 +202,9 @@ public: void UpdatedBlockTip(const CBlockIndex *, const CBlockIndex *, bool fInitialDownload); void TransactionAddedToMempool(const CTransactionRef&, uint64_t mempool_sequence); void TransactionRemovedFromMempool(const CTransactionRef&, MemPoolRemovalReason, uint64_t mempool_sequence); - void BlockConnected(const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex); + void BlockConnected(ChainstateRole, const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex); void BlockDisconnected(const std::shared_ptr<const CBlock> &, const CBlockIndex* pindex); - void ChainStateFlushed(const CBlockLocator &); + void ChainStateFlushed(ChainstateRole, const CBlockLocator &); void BlockChecked(const CBlock&, const BlockValidationState&); void NewPoWValidBlock(const CBlockIndex *, const std::shared_ptr<const CBlock>&); }; diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp index d6b9b68e1f..391e120932 100644 --- a/src/wallet/coinselection.cpp +++ b/src/wallet/coinselection.cpp @@ -7,6 +7,7 @@ #include <common/system.h> #include <consensus/amount.h> #include <consensus/consensus.h> +#include <interfaces/chain.h> #include <logging.h> #include <policy/feerate.h> #include <util/check.h> @@ -449,19 +450,19 @@ void OutputGroupTypeMap::Push(const OutputGroup& group, OutputType type, bool in } } -CAmount GetSelectionWaste(const std::set<std::shared_ptr<COutput>>& inputs, CAmount change_cost, CAmount target, bool use_effective_value) +CAmount SelectionResult::GetSelectionWaste(CAmount change_cost, CAmount target, bool use_effective_value) { // This function should not be called with empty inputs as that would mean the selection failed - assert(!inputs.empty()); + assert(!m_selected_inputs.empty()); // Always consider the cost of spending an input now vs in the future. CAmount waste = 0; - CAmount selected_effective_value = 0; - for (const auto& coin_ptr : inputs) { + for (const auto& coin_ptr : m_selected_inputs) { const COutput& coin = *coin_ptr; waste += coin.GetFee() - coin.long_term_fee; - selected_effective_value += use_effective_value ? coin.GetEffectiveValue() : coin.txout.nValue; } + // Bump fee of whole selection may diverge from sum of individual bump fees + waste -= bump_fee_group_discount; if (change_cost) { // Consider the cost of making change and spending it in the future @@ -470,6 +471,7 @@ CAmount GetSelectionWaste(const std::set<std::shared_ptr<COutput>>& inputs, CAmo waste += change_cost; } else { // When we are not making change (change_cost == 0), consider the excess we are throwing away to fees + CAmount selected_effective_value = use_effective_value ? GetSelectedEffectiveValue() : GetSelectedValue(); assert(selected_effective_value >= target); waste += selected_effective_value - target; } @@ -488,14 +490,22 @@ CAmount GenerateChangeTarget(const CAmount payment_value, const CAmount change_f } } +void SelectionResult::SetBumpFeeDiscount(const CAmount discount) +{ + // Overlapping ancestry can only lower the fees, not increase them + assert (discount >= 0); + bump_fee_group_discount = discount; +} + + void SelectionResult::ComputeAndSetWaste(const CAmount min_viable_change, const CAmount change_cost, const CAmount change_fee) { const CAmount change = GetChange(min_viable_change, change_fee); if (change > 0) { - m_waste = GetSelectionWaste(m_selected_inputs, change_cost, m_target, m_use_effective); + m_waste = GetSelectionWaste(change_cost, m_target, m_use_effective); } else { - m_waste = GetSelectionWaste(m_selected_inputs, 0, m_target, m_use_effective); + m_waste = GetSelectionWaste(0, m_target, m_use_effective); } } @@ -511,7 +521,12 @@ CAmount SelectionResult::GetSelectedValue() const CAmount SelectionResult::GetSelectedEffectiveValue() const { - return std::accumulate(m_selected_inputs.cbegin(), m_selected_inputs.cend(), CAmount{0}, [](CAmount sum, const auto& coin) { return sum + coin->GetEffectiveValue(); }); + return std::accumulate(m_selected_inputs.cbegin(), m_selected_inputs.cend(), CAmount{0}, [](CAmount sum, const auto& coin) { return sum + coin->GetEffectiveValue(); }) + bump_fee_group_discount; +} + +CAmount SelectionResult::GetTotalBumpFees() const +{ + return std::accumulate(m_selected_inputs.cbegin(), m_selected_inputs.cend(), CAmount{0}, [](CAmount sum, const auto& coin) { return sum + coin->ancestor_bump_fees; }) - bump_fee_group_discount; } void SelectionResult::Clear() diff --git a/src/wallet/coinselection.h b/src/wallet/coinselection.h index afd868fc89..20b2461c04 100644 --- a/src/wallet/coinselection.h +++ b/src/wallet/coinselection.h @@ -17,6 +17,7 @@ #include <optional> + namespace wallet { //! lower bound for randomly-chosen target change amount static constexpr CAmount CHANGE_LOWER{50000}; @@ -26,10 +27,10 @@ static constexpr CAmount CHANGE_UPPER{1000000}; /** A UTXO under consideration for use in funding a new transaction. */ struct COutput { private: - /** The output's value minus fees required to spend it.*/ + /** The output's value minus fees required to spend it and bump its unconfirmed ancestors to the target feerate. */ std::optional<CAmount> effective_value; - /** The fee required to spend this output at the transaction's target feerate. */ + /** The fee required to spend this output at the transaction's target feerate and to bump its unconfirmed ancestors to the target feerate. */ std::optional<CAmount> fee; public: @@ -71,6 +72,9 @@ public: /** The fee required to spend this output at the consolidation feerate. */ CAmount long_term_fee{0}; + /** The fee necessary to bump this UTXO's ancestor transactions to the target feerate */ + CAmount ancestor_bump_fees{0}; + COutput(const COutPoint& outpoint, const CTxOut& txout, int depth, int input_bytes, bool spendable, bool solvable, bool safe, int64_t time, bool from_me, const std::optional<CFeeRate> feerate = std::nullopt) : outpoint{outpoint}, txout{txout}, @@ -83,6 +87,7 @@ public: from_me{from_me} { if (feerate) { + // base fee without considering potential unconfirmed ancestors fee = input_bytes < 0 ? 0 : feerate.value().GetFee(input_bytes); effective_value = txout.nValue - fee.value(); } @@ -104,6 +109,16 @@ public: return outpoint < rhs.outpoint; } + void ApplyBumpFee(CAmount bump_fee) + { + assert(bump_fee >= 0); + ancestor_bump_fees = bump_fee; + assert(fee); + *fee += bump_fee; + // Note: assert(effective_value - bump_fee == nValue - fee.value()); + effective_value = txout.nValue - fee.value(); + } + CAmount GetFee() const { assert(fee.has_value()); @@ -275,26 +290,6 @@ struct OutputGroupTypeMap typedef std::map<CoinEligibilityFilter, OutputGroupTypeMap> FilteredOutputGroups; -/** Compute the waste for this result given the cost of change - * and the opportunity cost of spending these inputs now vs in the future. - * If change exists, waste = change_cost + inputs * (effective_feerate - long_term_feerate) - * If no change, waste = excess + inputs * (effective_feerate - long_term_feerate) - * where excess = selected_effective_value - target - * change_cost = effective_feerate * change_output_size + long_term_feerate * change_spend_size - * - * Note this function is separate from SelectionResult for the tests. - * - * @param[in] inputs The selected inputs - * @param[in] change_cost The cost of creating change and spending it in the future. - * Only used if there is change, in which case it must be positive. - * Must be 0 if there is no change. - * @param[in] target The amount targeted by the coin selection algorithm. - * @param[in] use_effective_value Whether to use the input's effective value (when true) or the real value (when false). - * @return The waste - */ -[[nodiscard]] CAmount GetSelectionWaste(const std::set<std::shared_ptr<COutput>>& inputs, CAmount change_cost, CAmount target, bool use_effective_value = true); - - /** Choose a random change target for each transaction to make it harder to fingerprint the Core * wallet based on the change output values of transactions it creates. * Change target covers at least change fees and adds a random value on top of it. @@ -336,6 +331,8 @@ private: std::optional<CAmount> m_waste; /** Total weight of the selected inputs */ int m_weight{0}; + /** How much individual inputs overestimated the bump fees for the shared ancestry */ + CAmount bump_fee_group_discount{0}; template<typename T> void InsertInputs(const T& inputs) @@ -348,6 +345,22 @@ private: } } + /** Compute the waste for this result given the cost of change + * and the opportunity cost of spending these inputs now vs in the future. + * If change exists, waste = change_cost + inputs * (effective_feerate - long_term_feerate) + * If no change, waste = excess + inputs * (effective_feerate - long_term_feerate) + * where excess = selected_effective_value - target + * change_cost = effective_feerate * change_output_size + long_term_feerate * change_spend_size + * + * @param[in] change_cost The cost of creating change and spending it in the future. + * Only used if there is change, in which case it must be positive. + * Must be 0 if there is no change. + * @param[in] target The amount targeted by the coin selection algorithm. + * @param[in] use_effective_value Whether to use the input's effective value (when true) or the real value (when false). + * @return The waste + */ + [[nodiscard]] CAmount GetSelectionWaste(CAmount change_cost, CAmount target, bool use_effective_value = true); + public: explicit SelectionResult(const CAmount target, SelectionAlgorithm algo) : m_target(target), m_algo(algo) {} @@ -359,11 +372,16 @@ public: [[nodiscard]] CAmount GetSelectedEffectiveValue() const; + [[nodiscard]] CAmount GetTotalBumpFees() const; + void Clear(); void AddInput(const OutputGroup& group); void AddInputs(const std::set<std::shared_ptr<COutput>>& inputs, bool subtract_fee_outputs); + /** How much individual inputs overestimated the bump fees for shared ancestries */ + void SetBumpFeeDiscount(const CAmount discount); + /** Calculates and stores the waste for this selection via GetSelectionWaste */ void ComputeAndSetWaste(const CAmount min_viable_change, const CAmount change_cost, const CAmount change_fee); [[nodiscard]] CAmount GetWaste() const; diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp index 3720d144eb..f4cb4bbd66 100644 --- a/src/wallet/feebumper.cpp +++ b/src/wallet/feebumper.cpp @@ -63,7 +63,7 @@ static feebumper::Result PreconditionChecks(const CWallet& wallet, const CWallet } //! Check if the user provided a valid feeRate -static feebumper::Result CheckFeeRate(const CWallet& wallet, const CFeeRate& newFeerate, const int64_t maxTxSize, CAmount old_fee, std::vector<bilingual_str>& errors) +static feebumper::Result CheckFeeRate(const CWallet& wallet, const CMutableTransaction& mtx, const CFeeRate& newFeerate, const int64_t maxTxSize, CAmount old_fee, std::vector<bilingual_str>& errors) { // check that fee rate is higher than mempool's minimum fee // (no point in bumping fee if we know that the new tx won't be accepted to the mempool) @@ -80,7 +80,17 @@ static feebumper::Result CheckFeeRate(const CWallet& wallet, const CFeeRate& new return feebumper::Result::WALLET_ERROR; } - CAmount new_total_fee = newFeerate.GetFee(maxTxSize); + std::vector<COutPoint> reused_inputs; + reused_inputs.reserve(mtx.vin.size()); + for (const CTxIn& txin : mtx.vin) { + reused_inputs.push_back(txin.prevout); + } + + std::optional<CAmount> combined_bump_fee = wallet.chain().CalculateCombinedBumpFee(reused_inputs, newFeerate); + if (!combined_bump_fee.has_value()) { + errors.push_back(strprintf(Untranslated("Failed to calculate bump fees, because unconfirmed UTXOs depend on enormous cluster of unconfirmed transactions."))); + } + CAmount new_total_fee = newFeerate.GetFee(maxTxSize) + combined_bump_fee.value(); CFeeRate incrementalRelayFee = std::max(wallet.chain().relayIncrementalFee(), CFeeRate(WALLET_INCREMENTAL_RELAY_FEE)); @@ -152,11 +162,11 @@ bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid) } Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCoinControl& coin_control, std::vector<bilingual_str>& errors, - CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine, const std::vector<CTxOut>& outputs, std::optional<uint32_t> reduce_output) + CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine, const std::vector<CTxOut>& outputs, std::optional<uint32_t> original_change_index) { - // Cannot both specify new outputs and an output to reduce - if (!outputs.empty() && reduce_output.has_value()) { - errors.push_back(Untranslated("Cannot specify both new outputs to use and an output index to reduce")); + // For now, cannot specify both new outputs to use and an output index to send change + if (!outputs.empty() && original_change_index.has_value()) { + errors.push_back(Untranslated("The options 'outputs' and 'original_change_index' are incompatible. You can only either specify a new set of outputs, or designate a change output to be recycled.")); return Result::INVALID_PARAMETER; } @@ -172,8 +182,8 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo } const CWalletTx& wtx = it->second; - // Make sure that reduce_output is valid - if (reduce_output.has_value() && reduce_output.value() >= wtx.tx->vout.size()) { + // Make sure that original_change_index is valid + if (original_change_index.has_value() && original_change_index.value() >= wtx.tx->vout.size()) { errors.push_back(Untranslated("Change position is out of range")); return Result::INVALID_PARAMETER; } @@ -247,12 +257,12 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo const auto& txouts = outputs.empty() ? wtx.tx->vout : outputs; for (size_t i = 0; i < txouts.size(); ++i) { const CTxOut& output = txouts.at(i); - if (reduce_output.has_value() ? reduce_output.value() == i : OutputIsChange(wallet, output)) { - CTxDestination change_dest; - ExtractDestination(output.scriptPubKey, change_dest); - new_coin_control.destChange = change_dest; + CTxDestination dest; + ExtractDestination(output.scriptPubKey, dest); + if (original_change_index.has_value() ? original_change_index.value() == i : OutputIsChange(wallet, output)) { + new_coin_control.destChange = dest; } else { - CRecipient recipient = {output.scriptPubKey, output.nValue, false}; + CRecipient recipient = {dest, output.nValue, false}; recipients.push_back(recipient); } new_outputs_value += output.nValue; @@ -268,7 +278,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo // Add change as recipient with SFFO flag enabled, so fees are deduced from it. // If the output differs from the original tx output (because the user customized it) a new change output will be created. - recipients.emplace_back(CRecipient{GetScriptForDestination(new_coin_control.destChange), new_outputs_value, /*fSubtractFeeFromAmount=*/true}); + recipients.emplace_back(CRecipient{new_coin_control.destChange, new_outputs_value, /*fSubtractFeeFromAmount=*/true}); new_coin_control.destChange = CNoDestination(); } @@ -283,7 +293,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo } temp_mtx.vout = txouts; const int64_t maxTxSize{CalculateMaximumSignedTxSize(CTransaction(temp_mtx), &wallet, &new_coin_control).vsize}; - Result res = CheckFeeRate(wallet, *new_coin_control.m_feerate, maxTxSize, old_fee, errors); + Result res = CheckFeeRate(wallet, temp_mtx, *new_coin_control.m_feerate, maxTxSize, old_fee, errors); if (res != Result::OK) { return res; } diff --git a/src/wallet/feebumper.h b/src/wallet/feebumper.h index f00bf15730..d3d43861ef 100644 --- a/src/wallet/feebumper.h +++ b/src/wallet/feebumper.h @@ -44,7 +44,7 @@ bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid); * @param[out] mtx The bump transaction itself * @param[in] require_mine Whether the original transaction must consist of inputs that can be spent by the wallet * @param[in] outputs Vector of new outputs to replace the bumped transaction's outputs - * @param[in] reduce_output The position of the change output to deduct the fee from in the transaction being bumped + * @param[in] original_change_index The position of the change output to deduct the fee from in the transaction being bumped */ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, @@ -55,7 +55,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, CMutableTransaction& mtx, bool require_mine, const std::vector<CTxOut>& outputs, - std::optional<uint32_t> reduce_output = std::nullopt); + std::optional<uint32_t> original_change_index = std::nullopt); //! Sign the new transaction, //! @return false if the tx couldn't be found or if it was diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index 5f2aee6923..65285187f4 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -41,6 +41,7 @@ using interfaces::Wallet; using interfaces::WalletAddress; using interfaces::WalletBalances; using interfaces::WalletLoader; +using interfaces::WalletMigrationResult; using interfaces::WalletOrderForm; using interfaces::WalletTx; using interfaces::WalletTxOut; @@ -66,6 +67,7 @@ WalletTx MakeWalletTx(CWallet& wallet, const CWalletTx& wtx) result.txout_address_is_mine.reserve(wtx.tx->vout.size()); for (const auto& txout : wtx.tx->vout) { result.txout_is_mine.emplace_back(wallet.IsMine(txout)); + result.txout_is_change.push_back(OutputIsChange(wallet, txout)); result.txout_address.emplace_back(); result.txout_address_is_mine.emplace_back(ExtractDestination(txout.scriptPubKey, result.txout_address.back()) ? wallet.IsMine(result.txout_address.back()) : @@ -630,6 +632,18 @@ public: return util::Error{error}; } } + util::Result<WalletMigrationResult> migrateWallet(const std::string& name, const SecureString& passphrase) override + { + auto res = wallet::MigrateLegacyToDescriptor(name, passphrase, m_context); + if (!res) return util::Error{util::ErrorString(res)}; + WalletMigrationResult out{ + .wallet = MakeWallet(m_context, res->wallet), + .watchonly_wallet_name = res->watchonly_wallet ? std::make_optional(res->watchonly_wallet->GetName()) : std::nullopt, + .solvables_wallet_name = res->solvables_wallet ? std::make_optional(res->solvables_wallet->GetName()) : std::nullopt, + .backup_path = res->backup_path, + }; + return {std::move(out)}; // std::move to work around clang bug + } std::string getWalletDir() override { return fs::PathToString(GetWalletDir()); diff --git a/src/wallet/rpc/addresses.cpp b/src/wallet/rpc/addresses.cpp index c1b99a4f97..e9b93afc30 100644 --- a/src/wallet/rpc/addresses.cpp +++ b/src/wallet/rpc/addresses.cpp @@ -427,6 +427,7 @@ public: explicit DescribeWalletAddressVisitor(const SigningProvider* _provider) : provider(_provider) {} UniValue operator()(const CNoDestination& dest) const { return UniValue(UniValue::VOBJ); } + UniValue operator()(const PubKeyDestination& dest) const { return UniValue(UniValue::VOBJ); } UniValue operator()(const PKHash& pkhash) const { diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index c4206e9897..6b96fc4e49 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -39,7 +39,6 @@ static void ParseRecipients(const UniValue& address_amounts, const UniValue& sub } destinations.insert(dest); - CScript script_pub_key = GetScriptForDestination(dest); CAmount amount = AmountFromValue(address_amounts[i++]); bool subtract_fee = false; @@ -50,7 +49,7 @@ static void ParseRecipients(const UniValue& address_amounts, const UniValue& sub } } - CRecipient recipient = {script_pub_key, amount, subtract_fee}; + CRecipient recipient = {dest, amount, subtract_fee}; recipients.push_back(recipient); } } @@ -1017,10 +1016,14 @@ static RPCHelpMan bumpfee_helper(std::string method_name) {"outputs", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "The outputs specified as key-value pairs.\n" "Each key may only appear once, i.e. there can only be one 'data' output, and no address may be duplicated.\n" "At least one output of either type must be specified.\n" - "Cannot be provided if 'reduce_output' is specified.", + "Cannot be provided if 'original_change_index' is specified.", OutputsDoc(), RPCArgOptions{.skip_type_check = true}}, - {"reduce_output", RPCArg::Type::NUM, RPCArg::DefaultHint{"not set, detect change automatically"}, "The 0-based index of the output from which the additional fees will be deducted. In general, this should be the position of change output. Cannot be provided if 'outputs' is specified."}, + {"original_change_index", RPCArg::Type::NUM, RPCArg::DefaultHint{"not set, detect change automatically"}, "The 0-based index of the change output on the original transaction. " + "The indicated output will be recycled into the new change output on the bumped transaction. " + "The remainder after paying the recipients and fees will be sent to the output script of the " + "original change output. The change output’s amount can increase if bumping the transaction " + "adds new inputs, otherwise it will decrease. Cannot be used in combination with the 'outputs' option."}, }, RPCArgOptions{.oneline_description="options"}}, }, @@ -1059,7 +1062,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name) coin_control.m_signal_bip125_rbf = true; std::vector<CTxOut> outputs; - std::optional<uint32_t> reduce_output; + std::optional<uint32_t> original_change_index; if (!request.params[1].isNull()) { UniValue options = request.params[1]; @@ -1071,7 +1074,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name) {"replaceable", UniValueType(UniValue::VBOOL)}, {"estimate_mode", UniValueType(UniValue::VSTR)}, {"outputs", UniValueType()}, // will be checked by AddOutputs() - {"reduce_output", UniValueType(UniValue::VNUM)}, + {"original_change_index", UniValueType(UniValue::VNUM)}, }, true, true); @@ -1096,8 +1099,8 @@ static RPCHelpMan bumpfee_helper(std::string method_name) outputs = tempTx.vout; } - if (options.exists("reduce_output")) { - reduce_output = options["reduce_output"].getInt<uint32_t>(); + if (options.exists("original_change_index")) { + original_change_index = options["original_change_index"].getInt<uint32_t>(); } } @@ -1116,7 +1119,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name) CMutableTransaction mtx; feebumper::Result res; // Targeting feerate bump. - res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt, outputs, reduce_output); + res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt, outputs, original_change_index); if (res != feebumper::Result::OK) { switch(res) { case feebumper::Result::INVALID_ADDRESS_OR_KEY: diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 1f510d1c58..20f735da12 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -1716,8 +1716,23 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPub } // All watchonly scripts are raw - spks.insert(setWatchOnly.begin(), setWatchOnly.end()); + for (const CScript& script : setWatchOnly) { + // As the legacy wallet allowed to import any script, we need to verify the validity here. + // LegacyScriptPubKeyMan::IsMine() return 'ISMINE_NO' for invalid or not watched scripts (IsMineResult::INVALID or IsMineResult::NO). + // e.g. a "sh(sh(pkh()))" which legacy wallets allowed to import!. + if (IsMine(script) != ISMINE_NO) spks.insert(script); + } + + return spks; +} +std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineScriptPubKeys() const +{ + LOCK(cs_KeyStore); + std::unordered_set<CScript, SaltedSipHasher> spks; + for (const CScript& script : setWatchOnly) { + if (IsMine(script) == ISMINE_NO) spks.insert(script); + } return spks; } diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index ec7b017720..7c0eca1475 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -123,20 +123,14 @@ public: template<typename Stream> void Serialize(Stream& s) const { - int nVersion = s.GetVersion(); - if (!(s.GetType() & SER_GETHASH)) { - s << nVersion; - } + s << int{259900}; // Unused field, writes the highest client version ever written s << nTime << vchPubKey << fInternal << m_pre_split; } template<typename Stream> void Unserialize(Stream& s) { - int nVersion = s.GetVersion(); - if (!(s.GetType() & SER_GETHASH)) { - s >> nVersion; - } + s >> int{}; // Discard unused field s >> nTime >> vchPubKey; try { s >> fInternal; @@ -525,6 +519,12 @@ public: std::set<CKeyID> GetKeys() const override; std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override; + /** + * Retrieves scripts that were imported by bugs into the legacy spkm and are + * simply invalid, such as a sh(sh(pkh())) script, or not watched. + */ + std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const; + /** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan. * Does not modify this ScriptPubKeyMan. */ std::optional<MigrationData> MigrateToDescriptor(); diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp index fd7f279505..7e6fba33aa 100644 --- a/src/wallet/spend.cpp +++ b/src/wallet/spend.cpp @@ -259,6 +259,7 @@ util::Result<PreSelectedInputs> FetchSelectedInputs(const CWallet& wallet, const { PreSelectedInputs result; const bool can_grind_r = wallet.CanGrindR(); + std::map<COutPoint, CAmount> map_of_bump_fees = wallet.chain().CalculateIndividualBumpFees(coin_control.ListSelected(), coin_selection_params.m_effective_feerate); for (const COutPoint& outpoint : coin_control.ListSelected()) { int input_bytes = -1; CTxOut txout; @@ -294,6 +295,7 @@ util::Result<PreSelectedInputs> FetchSelectedInputs(const CWallet& wallet, const /* Set some defaults for depth, spendable, solvable, safe, time, and from_me as these don't matter for preset inputs since no selection is being done. */ COutput output(outpoint, txout, /*depth=*/ 0, input_bytes, /*spendable=*/ true, /*solvable=*/ true, /*safe=*/ true, /*time=*/ 0, /*from_me=*/ false, coin_selection_params.m_effective_feerate); + output.ApplyBumpFee(map_of_bump_fees.at(output.outpoint)); result.Insert(output, coin_selection_params.m_subtract_fee_outputs); } return result; @@ -314,6 +316,7 @@ CoinsResult AvailableCoins(const CWallet& wallet, const int max_depth = {coinControl ? coinControl->m_max_depth : DEFAULT_MAX_DEPTH}; const bool only_safe = {coinControl ? !coinControl->m_include_unsafe_inputs : true}; const bool can_grind_r = wallet.CanGrindR(); + std::vector<COutPoint> outpoints; std::set<uint256> trusted_parents; for (const auto& entry : wallet.mapWallet) @@ -433,6 +436,8 @@ CoinsResult AvailableCoins(const CWallet& wallet, result.Add(GetOutputType(type, is_from_p2sh), COutput(outpoint, output, nDepth, input_bytes, spendable, solvable, safeTx, wtx.GetTxTime(), tx_from_me, feerate)); + outpoints.push_back(outpoint); + // Checks the sum amount of all UTXO's. if (params.min_sum_amount != MAX_MONEY) { if (result.GetTotalAmount() >= params.min_sum_amount) { @@ -447,6 +452,16 @@ CoinsResult AvailableCoins(const CWallet& wallet, } } + if (feerate.has_value()) { + std::map<COutPoint, CAmount> map_of_bump_fees = wallet.chain().CalculateIndividualBumpFees(outpoints, feerate.value()); + + for (auto& [_, outputs] : result.coins) { + for (auto& output : outputs) { + output.ApplyBumpFee(map_of_bump_fees.at(output.outpoint)); + } + } + } + return result; } @@ -490,8 +505,15 @@ std::map<CTxDestination, std::vector<COutput>> ListCoins(const CWallet& wallet) coins_params.skip_locked = false; for (const COutput& coin : AvailableCoins(wallet, &coin_control, /*feerate=*/std::nullopt, coins_params).All()) { CTxDestination address; - if ((coin.spendable || (wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && coin.solvable)) && - ExtractDestination(FindNonChangeParentOutput(wallet, coin.outpoint).scriptPubKey, address)) { + if ((coin.spendable || (wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && coin.solvable))) { + if (!ExtractDestination(FindNonChangeParentOutput(wallet, coin.outpoint).scriptPubKey, address)) { + // For backwards compatibility, we convert P2PK output scripts into PKHash destinations + if (auto pk_dest = std::get_if<PubKeyDestination>(&address)) { + address = PKHash(pk_dest->GetPubKey()); + } else { + continue; + } + } result[address].emplace_back(coin); } } @@ -628,13 +650,13 @@ FilteredOutputGroups GroupOutputs(const CWallet& wallet, // Returns true if the result contains an error and the message is not empty static bool HasErrorMsg(const util::Result<SelectionResult>& res) { return !util::ErrorString(res).empty(); } -util::Result<SelectionResult> AttemptSelection(const CAmount& nTargetValue, OutputGroupTypeMap& groups, +util::Result<SelectionResult> AttemptSelection(interfaces::Chain& chain, const CAmount& nTargetValue, OutputGroupTypeMap& groups, const CoinSelectionParams& coin_selection_params, bool allow_mixed_output_types) { // Run coin selection on each OutputType and compute the Waste Metric std::vector<SelectionResult> results; for (auto& [type, group] : groups.groups_by_type) { - auto result{ChooseSelectionResult(nTargetValue, group, coin_selection_params)}; + auto result{ChooseSelectionResult(chain, nTargetValue, group, coin_selection_params)}; // If any specific error message appears here, then something particularly wrong happened. if (HasErrorMsg(result)) return result; // So let's return the specific error. // Append the favorable result. @@ -648,14 +670,14 @@ util::Result<SelectionResult> AttemptSelection(const CAmount& nTargetValue, Outp // over all available coins, which would allow mixing. // If TypesCount() <= 1, there is nothing to mix. if (allow_mixed_output_types && groups.TypesCount() > 1) { - return ChooseSelectionResult(nTargetValue, groups.all_groups, coin_selection_params); + return ChooseSelectionResult(chain, nTargetValue, groups.all_groups, coin_selection_params); } // Either mixing is not allowed and we couldn't find a solution from any single OutputType, or mixing was allowed and we still couldn't // find a solution using all available coins return util::Error(); }; -util::Result<SelectionResult> ChooseSelectionResult(const CAmount& nTargetValue, Groups& groups, const CoinSelectionParams& coin_selection_params) +util::Result<SelectionResult> ChooseSelectionResult(interfaces::Chain& chain, const CAmount& nTargetValue, Groups& groups, const CoinSelectionParams& coin_selection_params) { // Vector of results. We will choose the best one based on waste. std::vector<SelectionResult> results; @@ -680,12 +702,10 @@ util::Result<SelectionResult> ChooseSelectionResult(const CAmount& nTargetValue, // The knapsack solver has some legacy behavior where it will spend dust outputs. We retain this behavior, so don't filter for positive only here. if (auto knapsack_result{KnapsackSolver(groups.mixed_group, nTargetValue, coin_selection_params.m_min_change_target, coin_selection_params.rng_fast, max_inputs_weight)}) { - knapsack_result->ComputeAndSetWaste(coin_selection_params.min_viable_change, coin_selection_params.m_cost_of_change, coin_selection_params.m_change_fee); results.push_back(*knapsack_result); } else append_error(knapsack_result); if (auto srd_result{SelectCoinsSRD(groups.positive_group, nTargetValue, coin_selection_params.m_change_fee, coin_selection_params.rng_fast, max_inputs_weight)}) { - srd_result->ComputeAndSetWaste(coin_selection_params.min_viable_change, coin_selection_params.m_cost_of_change, coin_selection_params.m_change_fee); results.push_back(*srd_result); } else append_error(srd_result); @@ -695,6 +715,27 @@ util::Result<SelectionResult> ChooseSelectionResult(const CAmount& nTargetValue, return errors.empty() ? util::Error() : errors.front(); } + // If the chosen input set has unconfirmed inputs, check for synergies from overlapping ancestry + for (auto& result : results) { + std::vector<COutPoint> outpoints; + std::set<std::shared_ptr<COutput>> coins = result.GetInputSet(); + CAmount summed_bump_fees = 0; + for (auto& coin : coins) { + if (coin->depth > 0) continue; // Bump fees only exist for unconfirmed inputs + outpoints.push_back(coin->outpoint); + summed_bump_fees += coin->ancestor_bump_fees; + } + std::optional<CAmount> combined_bump_fee = chain.CalculateCombinedBumpFee(outpoints, coin_selection_params.m_effective_feerate); + if (!combined_bump_fee.has_value()) { + return util::Error{_("Failed to calculate bump fees, because unconfirmed UTXOs depend on enormous cluster of unconfirmed transactions.")}; + } + CAmount bump_fee_overestimate = summed_bump_fees - combined_bump_fee.value(); + if (bump_fee_overestimate) { + result.SetBumpFeeDiscount(bump_fee_overestimate); + } + result.ComputeAndSetWaste(coin_selection_params.min_viable_change, coin_selection_params.m_cost_of_change, coin_selection_params.m_change_fee); + } + // Choose the result with the least waste // If the waste is the same, choose the one which spends more inputs. return *std::min_element(results.begin(), results.end()); @@ -824,7 +865,7 @@ util::Result<SelectionResult> AutomaticCoinSelection(const CWallet& wallet, Coin for (const auto& select_filter : ordered_filters) { auto it = filtered_groups.find(select_filter.filter); if (it == filtered_groups.end()) continue; - if (auto res{AttemptSelection(value_to_select, it->second, + if (auto res{AttemptSelection(wallet.chain(), value_to_select, it->second, coin_selection_params, select_filter.allow_mixed_output_types)}) { return res; // result found } else { @@ -1037,7 +1078,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal( // vouts to the payees for (const auto& recipient : vecSend) { - CTxOut txout(recipient.nAmount, recipient.scriptPubKey); + CTxOut txout(recipient.nAmount, GetScriptForDestination(recipient.dest)); // Include the fee cost for outputs. coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout, PROTOCOL_VERSION); @@ -1120,7 +1161,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal( if (nBytes == -1) { return util::Error{_("Missing solving data for estimating transaction size")}; } - CAmount fee_needed = coin_selection_params.m_effective_feerate.GetFee(nBytes); + CAmount fee_needed = coin_selection_params.m_effective_feerate.GetFee(nBytes) + result.GetTotalBumpFees(); const CAmount output_value = CalculateOutputValue(txNew); Assume(recipients_sum + change_amount == output_value); CAmount current_fee = result.GetSelectedValue() - output_value; @@ -1285,7 +1326,9 @@ bool FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& nFeeRet, // Turn the txout set into a CRecipient vector. for (size_t idx = 0; idx < tx.vout.size(); idx++) { const CTxOut& txOut = tx.vout[idx]; - CRecipient recipient = {txOut.scriptPubKey, txOut.nValue, setSubtractFeeFromOutputs.count(idx) == 1}; + CTxDestination dest; + ExtractDestination(txOut.scriptPubKey, dest); + CRecipient recipient = {dest, txOut.nValue, setSubtractFeeFromOutputs.count(idx) == 1}; vecSend.push_back(recipient); } diff --git a/src/wallet/spend.h b/src/wallet/spend.h index cc9ccf3011..407627b5f1 100644 --- a/src/wallet/spend.h +++ b/src/wallet/spend.h @@ -123,6 +123,7 @@ FilteredOutputGroups GroupOutputs(const CWallet& wallet, * the solution (according to the waste metric) will be chosen. If a valid input cannot be found from any * single OutputType, fallback to running `ChooseSelectionResult()` over all available coins. * + * param@[in] chain The chain interface to get information on unconfirmed UTXOs bump fees * param@[in] nTargetValue The target value * param@[in] groups The grouped outputs mapped by coin eligibility filters * param@[in] coin_selection_params Parameters for the coin selection @@ -132,7 +133,7 @@ FilteredOutputGroups GroupOutputs(const CWallet& wallet, * or (2) an specific error message if there was something particularly wrong (e.g. a selection * result that surpassed the tx max weight size). */ -util::Result<SelectionResult> AttemptSelection(const CAmount& nTargetValue, OutputGroupTypeMap& groups, +util::Result<SelectionResult> AttemptSelection(interfaces::Chain& chain, const CAmount& nTargetValue, OutputGroupTypeMap& groups, const CoinSelectionParams& coin_selection_params, bool allow_mixed_output_types); /** @@ -140,6 +141,7 @@ util::Result<SelectionResult> AttemptSelection(const CAmount& nTargetValue, Outp * Multiple coin selection algorithms will be run and the input set that produces the least waste * (according to the waste metric) will be chosen. * + * param@[in] chain The chain interface to get information on unconfirmed UTXOs bump fees * param@[in] nTargetValue The target value * param@[in] groups The struct containing the outputs grouped by script and divided by (1) positive only outputs and (2) all outputs (positive + negative). * param@[in] coin_selection_params Parameters for the coin selection @@ -148,7 +150,7 @@ util::Result<SelectionResult> AttemptSelection(const CAmount& nTargetValue, Outp * or (2) an specific error message if there was something particularly wrong (e.g. a selection * result that surpassed the tx max weight size). */ -util::Result<SelectionResult> ChooseSelectionResult(const CAmount& nTargetValue, Groups& groups, const CoinSelectionParams& coin_selection_params); +util::Result<SelectionResult> ChooseSelectionResult(interfaces::Chain& chain, const CAmount& nTargetValue, Groups& groups, const CoinSelectionParams& coin_selection_params); // User manually selected inputs that must be part of the transaction struct PreSelectedInputs diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp index c8283f453a..9569210ba0 100644 --- a/src/wallet/test/coinselector_tests.cpp +++ b/src/wallet/test/coinselector_tests.cpp @@ -58,15 +58,17 @@ static void add_coin(const CAmount& nValue, int nInput, SelectionResult& result) result.AddInput(group); } -static void add_coin(const CAmount& nValue, int nInput, CoinSet& set, CAmount fee = 0, CAmount long_term_fee = 0) +static void add_coin(const CAmount& nValue, int nInput, SelectionResult& result, CAmount fee, CAmount long_term_fee) { CMutableTransaction tx; tx.vout.resize(nInput + 1); tx.vout[nInput].nValue = nValue; tx.nLockTime = nextLockTime++; // so all transactions get different hashes - COutput coin(COutPoint(tx.GetHash(), nInput), tx.vout.at(nInput), /*depth=*/ 1, /*input_bytes=*/ 148, /*spendable=*/ true, /*solvable=*/ true, /*safe=*/ true, /*time=*/ 0, /*from_me=*/ false, fee); - coin.long_term_fee = long_term_fee; - set.insert(std::make_shared<COutput>(coin)); + std::shared_ptr<COutput> coin = std::make_shared<COutput>(COutPoint(tx.GetHash(), nInput), tx.vout.at(nInput), /*depth=*/ 1, /*input_bytes=*/ 148, /*spendable=*/ true, /*solvable=*/ true, /*safe=*/ true, /*time=*/ 0, /*from_me=*/ false, fee); + OutputGroup group; + group.Insert(coin, /*ancestors=*/ 0, /*descendants=*/ 0); + coin->long_term_fee = long_term_fee; // group.Insert() will modify long_term_fee, so we need to set it afterwards + result.AddInput(group); } static void add_coin(CoinsResult& available_coins, CWallet& wallet, const CAmount& nValue, CFeeRate feerate = CFeeRate(0), int nAge = 6*24, bool fIsFromMe = false, int nInput =0, bool spendable = false, int custom_size = 0) @@ -827,7 +829,6 @@ BOOST_AUTO_TEST_CASE(SelectCoins_test) BOOST_AUTO_TEST_CASE(waste_test) { - CoinSet selection; const CAmount fee{100}; const CAmount change_cost{125}; const CAmount fee_diff{40}; @@ -835,92 +836,179 @@ BOOST_AUTO_TEST_CASE(waste_test) const CAmount target{2 * COIN}; const CAmount excess{in_amt - fee * 2 - target}; - // Waste with change is the change cost and difference between fee and long term fee - add_coin(1 * COIN, 1, selection, fee, fee - fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee - fee_diff); - const CAmount waste1 = GetSelectionWaste(selection, change_cost, target); - BOOST_CHECK_EQUAL(fee_diff * 2 + change_cost, waste1); - selection.clear(); - - // Waste without change is the excess and difference between fee and long term fee - add_coin(1 * COIN, 1, selection, fee, fee - fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee - fee_diff); - const CAmount waste_nochange1 = GetSelectionWaste(selection, 0, target); - BOOST_CHECK_EQUAL(fee_diff * 2 + excess, waste_nochange1); - selection.clear(); - - // Waste with change and fee == long term fee is just cost of change - add_coin(1 * COIN, 1, selection, fee, fee); - add_coin(2 * COIN, 2, selection, fee, fee); - BOOST_CHECK_EQUAL(change_cost, GetSelectionWaste(selection, change_cost, target)); - selection.clear(); - - // Waste without change and fee == long term fee is just the excess - add_coin(1 * COIN, 1, selection, fee, fee); - add_coin(2 * COIN, 2, selection, fee, fee); - BOOST_CHECK_EQUAL(excess, GetSelectionWaste(selection, 0, target)); - selection.clear(); - - // Waste will be greater when fee is greater, but long term fee is the same - add_coin(1 * COIN, 1, selection, fee * 2, fee - fee_diff); - add_coin(2 * COIN, 2, selection, fee * 2, fee - fee_diff); - const CAmount waste2 = GetSelectionWaste(selection, change_cost, target); - BOOST_CHECK_GT(waste2, waste1); - selection.clear(); - - // Waste with change is the change cost and difference between fee and long term fee - // With long term fee greater than fee, waste should be less than when long term fee is less than fee - add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); - const CAmount waste3 = GetSelectionWaste(selection, change_cost, target); - BOOST_CHECK_EQUAL(fee_diff * -2 + change_cost, waste3); - BOOST_CHECK_LT(waste3, waste1); - selection.clear(); - - // Waste without change is the excess and difference between fee and long term fee - // With long term fee greater than fee, waste should be less than when long term fee is less than fee - add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); - const CAmount waste_nochange2 = GetSelectionWaste(selection, 0, target); - BOOST_CHECK_EQUAL(fee_diff * -2 + excess, waste_nochange2); - BOOST_CHECK_LT(waste_nochange2, waste_nochange1); - selection.clear(); - - // No Waste when fee == long_term_fee, no change, and no excess - add_coin(1 * COIN, 1, selection, fee, fee); - add_coin(2 * COIN, 2, selection, fee, fee); - const CAmount exact_target{in_amt - fee * 2}; - BOOST_CHECK_EQUAL(0, GetSelectionWaste(selection, /*change_cost=*/0, exact_target)); - selection.clear(); - - // No Waste when (fee - long_term_fee) == (-cost_of_change), and no excess - const CAmount new_change_cost{fee_diff * 2}; - add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); - BOOST_CHECK_EQUAL(0, GetSelectionWaste(selection, new_change_cost, target)); - selection.clear(); - - // No Waste when (fee - long_term_fee) == (-excess), no change cost - const CAmount new_target{in_amt - fee * 2 - fee_diff * 2}; - add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); - BOOST_CHECK_EQUAL(0, GetSelectionWaste(selection, /*change_cost=*/ 0, new_target)); - selection.clear(); - - // Negative waste when the long term fee is greater than the current fee and the selected value == target - const CAmount exact_target1{3 * COIN - 2 * fee}; - const CAmount target_waste1{-2 * fee_diff}; // = (2 * fee) - (2 * (fee + fee_diff)) - add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); - BOOST_CHECK_EQUAL(target_waste1, GetSelectionWaste(selection, /*change_cost=*/ 0, exact_target1)); - selection.clear(); - - // Negative waste when the long term fee is greater than the current fee and change_cost < - (inputs * (fee - long_term_fee)) - const CAmount large_fee_diff{90}; - const CAmount target_waste2{-2 * large_fee_diff + change_cost}; // = (2 * fee) - (2 * (fee + large_fee_diff)) + change_cost - add_coin(1 * COIN, 1, selection, fee, fee + large_fee_diff); - add_coin(2 * COIN, 2, selection, fee, fee + large_fee_diff); - BOOST_CHECK_EQUAL(target_waste2, GetSelectionWaste(selection, change_cost, target)); + // The following tests that the waste is calculated correctly in various scenarios. + // ComputeAndSetWaste will first determine the size of the change output. We don't really + // care about the change and just want to use the variant that always includes the change_cost, + // so min_viable_change and change_fee are set to 0 to ensure that. + { + // Waste with change is the change cost and difference between fee and long term fee + SelectionResult selection1{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection1, fee, fee - fee_diff); + add_coin(2 * COIN, 2, selection1, fee, fee - fee_diff); + selection1.ComputeAndSetWaste(/*min_viable_change=*/0, change_cost, /*change_fee=*/0); + BOOST_CHECK_EQUAL(fee_diff * 2 + change_cost, selection1.GetWaste()); + + // Waste will be greater when fee is greater, but long term fee is the same + SelectionResult selection2{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection2, fee * 2, fee - fee_diff); + add_coin(2 * COIN, 2, selection2, fee * 2, fee - fee_diff); + selection2.ComputeAndSetWaste(/*min_viable_change=*/0, change_cost, /*change_fee=*/0); + BOOST_CHECK_GT(selection2.GetWaste(), selection1.GetWaste()); + + // Waste with change is the change cost and difference between fee and long term fee + // With long term fee greater than fee, waste should be less than when long term fee is less than fee + SelectionResult selection3{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection3, fee, fee + fee_diff); + add_coin(2 * COIN, 2, selection3, fee, fee + fee_diff); + selection3.ComputeAndSetWaste(/*min_viable_change=*/0, change_cost, /*change_fee=*/0); + BOOST_CHECK_EQUAL(fee_diff * -2 + change_cost, selection3.GetWaste()); + BOOST_CHECK_LT(selection3.GetWaste(), selection1.GetWaste()); + } + + { + // Waste without change is the excess and difference between fee and long term fee + SelectionResult selection_nochange1{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection_nochange1, fee, fee - fee_diff); + add_coin(2 * COIN, 2, selection_nochange1, fee, fee - fee_diff); + selection_nochange1.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(fee_diff * 2 + excess, selection_nochange1.GetWaste()); + + // Waste without change is the excess and difference between fee and long term fee + // With long term fee greater than fee, waste should be less than when long term fee is less than fee + SelectionResult selection_nochange2{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection_nochange2, fee, fee + fee_diff); + add_coin(2 * COIN, 2, selection_nochange2, fee, fee + fee_diff); + selection_nochange2.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(fee_diff * -2 + excess, selection_nochange2.GetWaste()); + BOOST_CHECK_LT(selection_nochange2.GetWaste(), selection_nochange1.GetWaste()); + } + + { + // Waste with change and fee == long term fee is just cost of change + SelectionResult selection{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, fee, fee); + add_coin(2 * COIN, 2, selection, fee, fee); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, change_cost, /*change_fee=*/0); + BOOST_CHECK_EQUAL(change_cost, selection.GetWaste()); + } + + { + // Waste without change and fee == long term fee is just the excess + SelectionResult selection{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, fee, fee); + add_coin(2 * COIN, 2, selection, fee, fee); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(excess, selection.GetWaste()); + } + + { + // No Waste when fee == long_term_fee, no change, and no excess + const CAmount exact_target{in_amt - fee * 2}; + SelectionResult selection{exact_target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, fee, fee); + add_coin(2 * COIN, 2, selection, fee, fee); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(0, selection.GetWaste()); + } + + { + // No Waste when (fee - long_term_fee) == (-cost_of_change), and no excess + SelectionResult selection{target, SelectionAlgorithm::MANUAL}; + const CAmount new_change_cost{fee_diff * 2}; + add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, new_change_cost, /*change_fee=*/0); + BOOST_CHECK_EQUAL(0, selection.GetWaste()); + } + + { + // No Waste when (fee - long_term_fee) == (-excess), no change cost + const CAmount new_target{in_amt - fee * 2 - fee_diff * 2}; + SelectionResult selection{new_target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(0, selection.GetWaste()); + } + + { + // Negative waste when the long term fee is greater than the current fee and the selected value == target + const CAmount exact_target{3 * COIN - 2 * fee}; + SelectionResult selection{exact_target, SelectionAlgorithm::MANUAL}; + const CAmount target_waste1{-2 * fee_diff}; // = (2 * fee) - (2 * (fee + fee_diff)) + add_coin(1 * COIN, 1, selection, fee, fee + fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, /*change_cost=*/0, /*change_fee=*/0); + BOOST_CHECK_EQUAL(target_waste1, selection.GetWaste()); + } + + { + // Negative waste when the long term fee is greater than the current fee and change_cost < - (inputs * (fee - long_term_fee)) + SelectionResult selection{target, SelectionAlgorithm::MANUAL}; + const CAmount large_fee_diff{90}; + const CAmount target_waste2{-2 * large_fee_diff + change_cost}; // = (2 * fee) - (2 * (fee + large_fee_diff)) + change_cost + add_coin(1 * COIN, 1, selection, fee, fee + large_fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + large_fee_diff); + selection.ComputeAndSetWaste(/*min_viable_change=*/0, change_cost, /*change_fee=*/0); + BOOST_CHECK_EQUAL(target_waste2, selection.GetWaste()); + } +} + + +BOOST_AUTO_TEST_CASE(bump_fee_test) +{ + const CAmount fee{100}; + const CAmount min_viable_change{200}; + const CAmount change_cost{125}; + const CAmount change_fee{35}; + const CAmount fee_diff{40}; + const CAmount target{2 * COIN}; + + { + SelectionResult selection{target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, /*fee=*/fee, /*long_term_fee=*/fee + fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); + const std::vector<std::shared_ptr<COutput>> inputs = selection.GetShuffledInputVector(); + + for (size_t i = 0; i < inputs.size(); ++i) { + inputs[i]->ApplyBumpFee(20*(i+1)); + } + + selection.ComputeAndSetWaste(min_viable_change, change_cost, change_fee); + CAmount expected_waste = fee_diff * -2 + change_cost + /*bump_fees=*/60; + BOOST_CHECK_EQUAL(expected_waste, selection.GetWaste()); + + selection.SetBumpFeeDiscount(30); + selection.ComputeAndSetWaste(min_viable_change, change_cost, change_fee); + expected_waste = fee_diff * -2 + change_cost + /*bump_fees=*/60 - /*group_discount=*/30; + BOOST_CHECK_EQUAL(expected_waste, selection.GetWaste()); + } + + { + // Test with changeless transaction + // + // Bump fees and excess both contribute fully to the waste score, + // therefore, a bump fee group discount will not change the waste + // score as long as we do not create change in both instances. + CAmount changeless_target = 3 * COIN - 2 * fee - 100; + SelectionResult selection{changeless_target, SelectionAlgorithm::MANUAL}; + add_coin(1 * COIN, 1, selection, /*fee=*/fee, /*long_term_fee=*/fee + fee_diff); + add_coin(2 * COIN, 2, selection, fee, fee + fee_diff); + const std::vector<std::shared_ptr<COutput>> inputs = selection.GetShuffledInputVector(); + + for (size_t i = 0; i < inputs.size(); ++i) { + inputs[i]->ApplyBumpFee(20*(i+1)); + } + + selection.ComputeAndSetWaste(min_viable_change, change_cost, change_fee); + CAmount expected_waste = fee_diff * -2 + /*bump_fees=*/60 + /*excess = 100 - bump_fees*/40; + BOOST_CHECK_EQUAL(expected_waste, selection.GetWaste()); + + selection.SetBumpFeeDiscount(30); + selection.ComputeAndSetWaste(min_viable_change, change_cost, change_fee); + expected_waste = fee_diff * -2 + /*bump_fees=*/60 - /*group_discount=*/30 + /*excess = 100 - bump_fees + group_discount*/70; + BOOST_CHECK_EQUAL(expected_waste, selection.GetWaste()); + } } BOOST_AUTO_TEST_CASE(effective_value_test) diff --git a/src/wallet/test/fuzz/notifications.cpp b/src/wallet/test/fuzz/notifications.cpp index 42accafe5b..abd788f96f 100644 --- a/src/wallet/test/fuzz/notifications.cpp +++ b/src/wallet/test/fuzz/notifications.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <kernel/chain.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> @@ -145,8 +146,8 @@ FUZZ_TARGET(wallet_notifications, .init = initialize_setup) // time to the maximum value. This ensures that the wallet's birth time is always // earlier than this maximum time. info.chain_time_max = std::numeric_limits<unsigned int>::max(); - a.wallet->blockConnected(info); - b.wallet->blockConnected(info); + a.wallet->blockConnected(ChainstateRole::NORMAL, info); + b.wallet->blockConnected(ChainstateRole::NORMAL, info); // Store the coins for the next block Coins coins_new; for (const auto& tx : block.vtx) { diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp index eca1d74cf6..68c98ae6b9 100644 --- a/src/wallet/test/spend_tests.cpp +++ b/src/wallet/test/spend_tests.cpp @@ -27,7 +27,7 @@ BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup) // leftover input amount which would have been change to the recipient // instead of the miner. auto check_tx = [&wallet](CAmount leftover_input_amount) { - CRecipient recipient{GetScriptForRawPubKey({}), 50 * COIN - leftover_input_amount, /*subtract_fee=*/true}; + CRecipient recipient{PubKeyDestination({}), 50 * COIN - leftover_input_amount, /*subtract_fee=*/true}; constexpr int RANDOM_CHANGE_POSITION = -1; CCoinControl coin_control; coin_control.m_feerate.emplace(10000); diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 5c297d76e4..21ed52731a 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -645,7 +645,7 @@ void TestCoinsResult(ListCoinsTest& context, OutputType out_type, CAmount amount { LOCK(context.wallet->cs_wallet); util::Result<CTxDestination> dest = Assert(context.wallet->GetNewDestination(out_type, "")); - CWalletTx& wtx = context.AddTx(CRecipient{{GetScriptForDestination(*dest)}, amount, /*fSubtractFeeFromAmount=*/true}); + CWalletTx& wtx = context.AddTx(CRecipient{*dest, amount, /*fSubtractFeeFromAmount=*/true}); CoinFilterParams filter; filter.skip_locked = false; CoinsResult available_coins = AvailableCoins(*context.wallet, nullptr, std::nullopt, filter); @@ -752,14 +752,14 @@ bool malformed_descriptor(std::ios_base::failure e) BOOST_FIXTURE_TEST_CASE(wallet_descriptor_test, BasicTestingSetup) { std::vector<unsigned char> malformed_record; - CVectorWriter vw(0, 0, malformed_record, 0); + CVectorWriter vw{0, malformed_record, 0}; vw << std::string("notadescriptor"); vw << uint64_t{0}; vw << int32_t{0}; vw << int32_t{0}; vw << int32_t{1}; - SpanReader vr{0, 0, malformed_record}; + SpanReader vr{0, malformed_record}; WalletDescriptor w_desc; BOOST_CHECK_EXCEPTION(vr >> w_desc, std::ios_base::failure, malformed_descriptor); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6f5248efaf..c240e88531 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -22,6 +22,7 @@ #include <interfaces/chain.h> #include <interfaces/handler.h> #include <interfaces/wallet.h> +#include <kernel/chain.h> #include <kernel/mempool_removal_reason.h> #include <key.h> #include <key_io.h> @@ -626,11 +627,11 @@ bool CWallet::ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase, return false; } -void CWallet::chainStateFlushed(const CBlockLocator& loc) +void CWallet::chainStateFlushed(ChainstateRole role, const CBlockLocator& loc) { // Don't update the best block until the chain is attached so that in case of a shutdown, // the rescan will be restarted at next startup. - if (m_attaching_chain) { + if (m_attaching_chain || role == ChainstateRole::BACKGROUND) { return; } WalletBatch batch(GetDatabase()); @@ -1339,11 +1340,14 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c { LOCK(cs_wallet); - int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1; // If number of conflict confirms cannot be determined, this means // that the block is still unknown or not yet part of the main chain, // for example when loading the wallet during a reindex. Do nothing in that // case. + if (m_last_block_processed_height < 0 || conflicting_height < 0) { + return; + } + int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1; if (conflictconfirms >= 0) return; @@ -1462,8 +1466,11 @@ void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRe } } -void CWallet::blockConnected(const interfaces::BlockInfo& block) +void CWallet::blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) { + if (role == ChainstateRole::BACKGROUND) { + return; + } assert(block.data); LOCK(cs_wallet); @@ -2213,15 +2220,13 @@ OutputType CWallet::TransactionChangeType(const std::optional<OutputType>& chang bool any_pkh{false}; for (const auto& recipient : vecSend) { - std::vector<std::vector<uint8_t>> dummy; - const TxoutType type{Solver(recipient.scriptPubKey, dummy)}; - if (type == TxoutType::WITNESS_V1_TAPROOT) { + if (std::get_if<WitnessV1Taproot>(&recipient.dest)) { any_tr = true; - } else if (type == TxoutType::WITNESS_V0_KEYHASH) { + } else if (std::get_if<WitnessV0KeyHash>(&recipient.dest)) { any_wpkh = true; - } else if (type == TxoutType::SCRIPTHASH) { + } else if (std::get_if<ScriptHash>(&recipient.dest)) { any_sh = true; - } else if (type == TxoutType::PUBKEYHASH) { + } else if (std::get_if<PKHash>(&recipient.dest)) { any_pkh = true; } } @@ -2943,7 +2948,7 @@ std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::stri } if (chain) { - walletInstance->chainStateFlushed(chain->getTipLocator()); + walletInstance->chainStateFlushed(ChainstateRole::NORMAL, chain->getTipLocator()); } } else if (wallet_creation_flags & WALLET_FLAG_DISABLE_PRIVATE_KEYS) { // Make it impossible to disable private keys after creation @@ -3229,7 +3234,7 @@ bool CWallet::AttachChain(const std::shared_ptr<CWallet>& walletInstance, interf } } walletInstance->m_attaching_chain = false; - walletInstance->chainStateFlushed(chain.getTipLocator()); + walletInstance->chainStateFlushed(ChainstateRole::NORMAL, chain.getTipLocator()); walletInstance->GetDatabase().IncrementUpdateCounter(); } walletInstance->m_attaching_chain = false; @@ -3862,6 +3867,13 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error) return false; } + // Get all invalid or non-watched scripts that will not be migrated + std::set<CTxDestination> not_migrated_dests; + for (const auto& script : legacy_spkm->GetNotMineScriptPubKeys()) { + CTxDestination dest; + if (ExtractDestination(script, dest)) not_migrated_dests.emplace(dest); + } + for (auto& desc_spkm : data.desc_spkms) { if (m_spk_managers.count(desc_spkm->GetID()) > 0) { error = _("Error: Duplicate descriptors created during migration. Your wallet may be corrupted."); @@ -3968,6 +3980,13 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error) continue; } } + + // Skip invalid/non-watched scripts that will not be migrated + if (not_migrated_dests.count(addr_pair.first) > 0) { + dests_to_delete.push_back(addr_pair.first); + continue; + } + // Not ours, not in watchonly wallet, and not in solvable error = _("Error: Address book data in wallet cannot be identified to belong to migrated wallets"); return false; @@ -4201,7 +4220,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle // Migration successful, unload the wallet locally, then reload it. assert(local_wallet.use_count() == 1); local_wallet.reset(); - LoadWallet(context, wallet_name, /*load_on_start=*/std::nullopt, options, status, error, warnings); + res.wallet = LoadWallet(context, wallet_name, /*load_on_start=*/std::nullopt, options, status, error, warnings); res.wallet_name = wallet_name; } else { // Migration failed, cleanup diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 091a573151..9333493a6e 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -288,7 +288,7 @@ inline std::optional<AddressPurpose> PurposeFromString(std::string_view s) struct CRecipient { - CScript scriptPubKey; + CTxDestination dest; CAmount nAmount; bool fSubtractFeeFromAmount; }; @@ -599,7 +599,7 @@ public: CWalletTx* AddToWallet(CTransactionRef tx, const TxState& state, const UpdateWalletTxFn& update_wtx=nullptr, bool fFlushOnClose=true, bool rescanning_old_block = false); bool LoadToWallet(const uint256& hash, const UpdateWalletTxFn& fill_wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); void transactionAddedToMempool(const CTransactionRef& tx) override; - void blockConnected(const interfaces::BlockInfo& block) override; + void blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) override; void blockDisconnected(const interfaces::BlockInfo& block) override; void updatedBlockTip() override; int64_t RescanFromTime(int64_t startTime, const WalletRescanReserver& reserver, bool update); @@ -777,7 +777,7 @@ public: /** should probably be renamed to IsRelevantToMe */ bool IsFromMe(const CTransaction& tx) const; CAmount GetDebit(const CTransaction& tx, const isminefilter& filter) const; - void chainStateFlushed(const CBlockLocator& loc) override; + void chainStateFlushed(ChainstateRole role, const CBlockLocator& loc) override; DBErrors LoadWallet(); DBErrors ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); @@ -1087,6 +1087,7 @@ bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_nam struct MigrationResult { std::string wallet_name; + std::shared_ptr<CWallet> wallet; std::shared_ptr<CWallet> watchonly_wallet; std::shared_ptr<CWallet> solvables_wallet; fs::path backup_path; diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index 6755368249..03aae86577 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -5,6 +5,7 @@ #include <zmq/zmqnotificationinterface.h> #include <common/args.h> +#include <kernel/chain.h> #include <logging.h> #include <primitives/block.h> #include <primitives/transaction.h> @@ -170,8 +171,11 @@ void CZMQNotificationInterface::TransactionRemovedFromMempool(const CTransaction }); } -void CZMQNotificationInterface::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) +void CZMQNotificationInterface::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) { + if (role == ChainstateRole::BACKGROUND) { + return; + } for (const CTransactionRef& ptx : pblock->vtx) { const CTransaction& tx = *ptx; TryForEachAndRemoveFailed(notifiers, [&tx](CZMQAbstractNotifier* notifier) { diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h index ce67633b30..4246c53bd3 100644 --- a/src/zmq/zmqnotificationinterface.h +++ b/src/zmq/zmqnotificationinterface.h @@ -33,7 +33,7 @@ protected: // CValidationInterface void TransactionAddedToMempool(const CTransactionRef& tx, uint64_t mempool_sequence) override; void TransactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason, uint64_t mempool_sequence) override; - void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override; + void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override; void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexDisconnected) override; void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override; |