aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/.clang-tidy1
-rw-r--r--src/addrdb.cpp2
-rw-r--r--src/addrman.cpp33
-rw-r--r--src/addrman.h5
-rw-r--r--src/addrman_impl.h4
-rw-r--r--src/bench/addrman.cpp2
-rw-r--r--src/bench/cluster_linearize.cpp189
-rw-r--r--src/bench/streams_findbyte.cpp2
-rw-r--r--src/bitcoin-chainstate.cpp2
-rw-r--r--src/chain.h2
-rw-r--r--src/cluster_linearize.h304
-rw-r--r--src/httpserver.cpp2
-rw-r--r--src/index/blockfilterindex.cpp6
-rw-r--r--src/index/txindex.cpp5
-rw-r--r--src/init.cpp8
-rw-r--r--src/interfaces/mining.h37
-rw-r--r--src/net.cpp10
-rw-r--r--src/net_processing.cpp57
-rw-r--r--src/net_processing.h5
-rw-r--r--src/netbase.cpp2
-rw-r--r--src/netbase.h7
-rw-r--r--src/node/blockstorage.cpp4
-rw-r--r--src/node/caches.cpp1
-rw-r--r--src/node/context.h2
-rw-r--r--src/node/interfaces.cpp51
-rw-r--r--src/node/mempool_persist.cpp4
-rw-r--r--src/node/miner.cpp4
-rw-r--r--src/node/utxo_snapshot.cpp6
-rw-r--r--src/pow.cpp11
-rw-r--r--src/pow.h1
-rw-r--r--src/qt/forms/optionsdialog.ui2
-rw-r--r--src/qt/locale/bitcoin_am.ts182
-rw-r--r--src/qt/locale/bitcoin_bn.ts4
-rw-r--r--src/qt/locale/bitcoin_de.ts77
-rw-r--r--src/qt/locale/bitcoin_de_CH.ts16
-rw-r--r--src/qt/locale/bitcoin_gl_ES.ts12
-rw-r--r--src/qt/locale/bitcoin_ru.ts39
-rw-r--r--src/qt/locale/bitcoin_sw.ts260
-rw-r--r--src/qt/locale/bitcoin_th.ts7
-rw-r--r--src/qt/optionsdialog.cpp3
-rw-r--r--src/rest.cpp7
-rw-r--r--src/rpc/blockchain.cpp43
-rw-r--r--src/rpc/blockchain.h1
-rw-r--r--src/rpc/mining.cpp70
-rw-r--r--src/rpc/rawtransaction.cpp9
-rw-r--r--src/rpc/txoutproof.cpp5
-rw-r--r--src/streams.cpp69
-rw-r--r--src/streams.h13
-rw-r--r--src/test/README.md48
-rw-r--r--src/test/addrman_tests.cpp44
-rw-r--r--src/test/fuzz/CMakeLists.txt1
-rw-r--r--src/test/fuzz/addrman.cpp10
-rw-r--r--src/test/fuzz/autofile.cpp1
-rw-r--r--src/test/fuzz/cluster_linearize.cpp88
-rw-r--r--src/test/fuzz/integer.cpp2
-rw-r--r--src/test/fuzz/p2p_headers_presync.cpp200
-rw-r--r--src/test/fuzz/pow.cpp2
-rw-r--r--src/test/streams_tests.cpp6
-rw-r--r--src/test/util/cluster_linearize.h42
-rw-r--r--src/txdb.h2
-rw-r--r--src/util/asmap.cpp6
-rw-r--r--src/validation.cpp2
-rw-r--r--src/validation.h2
63 files changed, 1675 insertions, 369 deletions
diff --git a/src/.clang-tidy b/src/.clang-tidy
index 3569dd04b1..1cf270833a 100644
--- a/src/.clang-tidy
+++ b/src/.clang-tidy
@@ -14,6 +14,7 @@ modernize-use-emplace,
modernize-use-equals-default,
modernize-use-noexcept,
modernize-use-nullptr,
+modernize-use-starts-ends-with,
performance-*,
-performance-avoid-endl,
-performance-enum-size,
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index e9838d7222..b89141c88e 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -73,7 +73,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
remove(pathTmp);
return false;
}
- if (!FileCommit(fileout.Get())) {
+ if (!fileout.Commit()) {
fileout.fclose();
remove(pathTmp);
LogError("%s: Failed to flush file %s\n", __func__, fs::PathToString(pathTmp));
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 673d25efcf..7aa97fa1b7 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -710,7 +710,7 @@ void AddrManImpl::Attempt_(const CService& addr, bool fCountFailure, NodeSeconds
}
}
-std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, const std::unordered_set<Network>& networks) const
{
AssertLockHeld(cs);
@@ -719,13 +719,18 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option
size_t new_count = nNew;
size_t tried_count = nTried;
- if (network.has_value()) {
- auto it = m_network_counts.find(*network);
- if (it == m_network_counts.end()) return {};
-
- auto counts = it->second;
- new_count = counts.n_new;
- tried_count = counts.n_tried;
+ if (!networks.empty()) {
+ new_count = 0;
+ tried_count = 0;
+ for (auto& network : networks) {
+ auto it = m_network_counts.find(network);
+ if (it == m_network_counts.end()) {
+ continue;
+ }
+ auto counts = it->second;
+ new_count += counts.n_new;
+ tried_count += counts.n_tried;
+ }
}
if (new_only && new_count == 0) return {};
@@ -758,9 +763,9 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option
position = (initial_position + i) % ADDRMAN_BUCKET_SIZE;
node_id = GetEntry(search_tried, bucket, position);
if (node_id != -1) {
- if (network.has_value()) {
+ if (!networks.empty()) {
const auto it{mapInfo.find(node_id)};
- if (Assume(it != mapInfo.end()) && it->second.GetNetwork() == *network) break;
+ if (Assume(it != mapInfo.end()) && networks.contains(it->second.GetNetwork())) break;
} else {
break;
}
@@ -1208,11 +1213,11 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::SelectTriedCollision()
return ret;
}
-std::pair<CAddress, NodeSeconds> AddrManImpl::Select(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrManImpl::Select(bool new_only, const std::unordered_set<Network>& networks) const
{
LOCK(cs);
Check();
- auto addrRet = Select_(new_only, network);
+ auto addrRet = Select_(new_only, networks);
Check();
return addrRet;
}
@@ -1315,9 +1320,9 @@ std::pair<CAddress, NodeSeconds> AddrMan::SelectTriedCollision()
return m_impl->SelectTriedCollision();
}
-std::pair<CAddress, NodeSeconds> AddrMan::Select(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrMan::Select(bool new_only, const std::unordered_set<Network>& networks) const
{
- return m_impl->Select(new_only, network);
+ return m_impl->Select(new_only, networks);
}
std::vector<CAddress> AddrMan::GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered) const
diff --git a/src/addrman.h b/src/addrman.h
index be2ee8c2cb..ba6e13bf97 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -15,6 +15,7 @@
#include <cstdint>
#include <memory>
#include <optional>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -154,12 +155,12 @@ public:
* an address from the new table or an empty pair. Passing `false` will return an
* empty pair or an address from either the new or tried table (it does not
* guarantee a tried entry).
- * @param[in] network Select only addresses of this network (nullopt = all). Passing a network may
+ * @param[in] networks Select only addresses of these networks (empty = all). Passing networks may
* slow down the search.
* @return CAddress The record for the selected peer.
* seconds The last time we attempted to connect to that peer.
*/
- std::pair<CAddress, NodeSeconds> Select(bool new_only = false, std::optional<Network> network = std::nullopt) const;
+ std::pair<CAddress, NodeSeconds> Select(bool new_only = false, const std::unordered_set<Network>& networks = {}) const;
/**
* Return all or many randomly selected addresses, optionally by network.
diff --git a/src/addrman_impl.h b/src/addrman_impl.h
index dd7f7b318f..a581f1f6f9 100644
--- a/src/addrman_impl.h
+++ b/src/addrman_impl.h
@@ -125,7 +125,7 @@ public:
std::pair<CAddress, NodeSeconds> SelectTriedCollision() EXCLUSIVE_LOCKS_REQUIRED(!cs);
- std::pair<CAddress, NodeSeconds> Select(bool new_only, std::optional<Network> network) const
+ std::pair<CAddress, NodeSeconds> Select(bool new_only, const std::unordered_set<Network>& networks) const
EXCLUSIVE_LOCKS_REQUIRED(!cs);
std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered = true) const
@@ -252,7 +252,7 @@ private:
void Attempt_(const CService& addr, bool fCountFailure, NodeSeconds time) EXCLUSIVE_LOCKS_REQUIRED(cs);
- std::pair<CAddress, NodeSeconds> Select_(bool new_only, std::optional<Network> network) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ std::pair<CAddress, NodeSeconds> Select_(bool new_only, const std::unordered_set<Network>& networks) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Helper to generalize looking up an addrman entry from either table.
*
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index c0ef7b2279..ceef6c29ab 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -133,7 +133,7 @@ static void AddrManSelectByNetwork(benchmark::Bench& bench)
FillAddrMan(addrman);
bench.run([&] {
- (void)addrman.Select(/*new_only=*/false, NET_I2P);
+ (void)addrman.Select(/*new_only=*/false, {NET_I2P});
});
}
diff --git a/src/bench/cluster_linearize.cpp b/src/bench/cluster_linearize.cpp
index de85741909..cf071dda2d 100644
--- a/src/bench/cluster_linearize.cpp
+++ b/src/bench/cluster_linearize.cpp
@@ -4,7 +4,9 @@
#include <bench/bench.h>
#include <cluster_linearize.h>
+#include <test/util/cluster_linearize.h>
#include <util/bitset.h>
+#include <util/strencodings.h>
#include <algorithm>
#include <cassert>
@@ -12,6 +14,7 @@
#include <vector>
using namespace cluster_linearize;
+using namespace util::hex_literals;
namespace {
@@ -45,8 +48,8 @@ DepGraph<SetType> MakeWideGraph(ClusterIndex ntx)
return depgraph;
}
-// Construct a difficult graph. These need at least sqrt(2^(n-1)) iterations in the best
-// known algorithms (purely empirically determined).
+// Construct a difficult graph. These need at least sqrt(2^(n-1)) iterations in the implemented
+// algorithm (purely empirically determined).
template<typename SetType>
DepGraph<SetType> MakeHardGraph(ClusterIndex ntx)
{
@@ -111,17 +114,18 @@ DepGraph<SetType> MakeHardGraph(ClusterIndex ntx)
return depgraph;
}
-/** Benchmark that does search-based candidate finding with 10000 iterations.
+/** Benchmark that does search-based candidate finding with a specified number of iterations.
*
- * Its goal is measuring how much time every additional search iteration in linearization costs.
+ * Its goal is measuring how much time every additional search iteration in linearization costs,
+ * by running with a low and a high count, subtracting the results, and divided by the number
+ * iterations difference.
*/
template<typename SetType>
-void BenchLinearizePerIterWorstCase(ClusterIndex ntx, benchmark::Bench& bench)
+void BenchLinearizeWorstCase(ClusterIndex ntx, benchmark::Bench& bench, uint64_t iter_limit)
{
const auto depgraph = MakeHardGraph<SetType>(ntx);
- const auto iter_limit = std::min<uint64_t>(10000, uint64_t{1} << (ntx / 2 - 1));
uint64_t rng_seed = 0;
- bench.batch(iter_limit).unit("iters").run([&] {
+ bench.run([&] {
SearchCandidateFinder finder(depgraph, rng_seed++);
auto [candidate, iters_performed] = finder.FindCandidateSet(iter_limit, {});
assert(iters_performed == iter_limit);
@@ -132,11 +136,12 @@ void BenchLinearizePerIterWorstCase(ClusterIndex ntx, benchmark::Bench& bench)
*
* Its goal is measuring how much time linearization may take without any search iterations.
*
- * If P is the resulting time of BenchLinearizePerIterWorstCase, and N is the resulting time of
- * BenchLinearizeNoItersWorstCase*, then an invocation of Linearize with max_iterations=m should
- * take no more than roughly N+m*P time. This may however be an overestimate, as the worst cases
- * do not coincide (the ones that are worst for linearization without any search happen to be ones
- * that do not need many search iterations).
+ * If P is the benchmarked per-iteration count (obtained by running BenchLinearizeWorstCase for a
+ * high and a low iteration count, subtracting them, and dividing by the difference in count), and
+ * N is the resulting time of BenchLinearizeNoItersWorstCase*, then an invocation of Linearize with
+ * max_iterations=m should take no more than roughly N+m*P time. This may however be an
+ * overestimate, as the worst cases do not coincide (the ones that are worst for linearization
+ * without any search happen to be ones that do not need many search iterations).
*
* This benchmark exercises a worst case for AncestorCandidateFinder, but for which improvement is
* cheap.
@@ -205,14 +210,57 @@ void BenchMergeLinearizationsWorstCase(ClusterIndex ntx, benchmark::Bench& bench
});
}
+template<size_t N>
+void BenchLinearizeOptimally(benchmark::Bench& bench, const std::array<uint8_t, N>& serialized)
+{
+ // Determine how many transactions the serialized cluster has.
+ ClusterIndex num_tx{0};
+ {
+ SpanReader reader{serialized};
+ DepGraph<BitSet<128>> depgraph;
+ reader >> Using<DepGraphFormatter>(depgraph);
+ num_tx = depgraph.TxCount();
+ assert(num_tx < 128);
+ }
+
+ SpanReader reader{serialized};
+ auto runner_fn = [&]<typename SetType>() noexcept {
+ DepGraph<SetType> depgraph;
+ reader >> Using<DepGraphFormatter>(depgraph);
+ uint64_t rng_seed = 0;
+ bench.run([&] {
+ auto res = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++);
+ assert(res.second);
+ });
+ };
+
+ if (num_tx <= 32) {
+ runner_fn.template operator()<BitSet<32>>();
+ } else if (num_tx <= 64) {
+ runner_fn.template operator()<BitSet<64>>();
+ } else if (num_tx <= 96) {
+ runner_fn.template operator()<BitSet<96>>();
+ } else if (num_tx <= 128) {
+ runner_fn.template operator()<BitSet<128>>();
+ } else {
+ assert(false);
+ }
+}
+
} // namespace
-static void LinearizePerIter16TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<16>>(16, bench); }
-static void LinearizePerIter32TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<32>>(32, bench); }
-static void LinearizePerIter48TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<48>>(48, bench); }
-static void LinearizePerIter64TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<64>>(64, bench); }
-static void LinearizePerIter75TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<75>>(75, bench); }
-static void LinearizePerIter99TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<99>>(99, bench); }
+static void Linearize16TxWorstCase20Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<16>>(16, bench, 20); }
+static void Linearize16TxWorstCase120Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<16>>(16, bench, 120); }
+static void Linearize32TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<32>>(32, bench, 5000); }
+static void Linearize32TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<32>>(32, bench, 15000); }
+static void Linearize48TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<48>>(48, bench, 5000); }
+static void Linearize48TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<48>>(48, bench, 15000); }
+static void Linearize64TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<64>>(64, bench, 5000); }
+static void Linearize64TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<64>>(64, bench, 15000); }
+static void Linearize75TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<75>>(75, bench, 5000); }
+static void Linearize75TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<75>>(75, bench, 15000); }
+static void Linearize99TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<99>>(99, bench, 5000); }
+static void Linearize99TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<99>>(99, bench, 15000); }
static void LinearizeNoIters16TxWorstCaseAnc(benchmark::Bench& bench) { BenchLinearizeNoItersWorstCaseAnc<BitSet<16>>(16, bench); }
static void LinearizeNoIters32TxWorstCaseAnc(benchmark::Bench& bench) { BenchLinearizeNoItersWorstCaseAnc<BitSet<32>>(32, bench); }
@@ -242,12 +290,84 @@ static void MergeLinearizations64TxWorstCase(benchmark::Bench& bench) { BenchMer
static void MergeLinearizations75TxWorstCase(benchmark::Bench& bench) { BenchMergeLinearizationsWorstCase<BitSet<75>>(75, bench); }
static void MergeLinearizations99TxWorstCase(benchmark::Bench& bench) { BenchMergeLinearizationsWorstCase<BitSet<99>>(99, bench); }
-BENCHMARK(LinearizePerIter16TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter32TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter48TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter64TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter75TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter99TxWorstCase, benchmark::PriorityLevel::HIGH);
+// The following example clusters were constructed by replaying historical mempool activity, and
+// selecting for ones that take many iterations (after the introduction of some but not all
+// linearization algorithm optimizations).
+
+/* 2023-05-05T23:12:21Z 71, 521780, 543141,*/
+static constexpr auto BENCH_EXAMPLE_00 = "801081a5360092239efc6201810982ab58029b6b98c86803800eed7804800ecb7e058f2f878778068030d43407853e81902a08962a81d176098010b6620a8010b2280b8010da3a0c9f069da9580d800db11e0e9d719ad37a0f967897ed5210990e99fc0e11812c81982012804685823e0f0a893982b6040a10804682c146110a6e80db5c120a8010819806130a8079858f0c140a8054829a120c12803483a1760c116f81843c0d11718189000e11800d81ac2c0f11800d81e50e10117181c77c1111822e87f2601012815983d17211127180f2121212811584a21e1312800e80d1781412813c83e81815126f80ef5016126f80ff6c16126f80f66017126e80fd541812800d81942a1912800e80dd781a12800d81f96c1b12805282e7581b127180fd721c1271a918230b805fc11a220d8118a15a2d036f80e5002011817684d8241e346f80e1181c37805082fc04260024800d81f8621734803382b354270b12805182ca2e162f800e80d52e0d32803dc360201b850e818c400b318c49808a5a290210805181d65823142a800d81a34e0850800e81fb3c0851886994fc0a280b00082c805482d208032e28805e83ba380059801081cd4a0159811884f770002e0015e17280e49024300a0000000000000031803dcb48014200"_hex_u8;
+/* 2023-12-06T09:30:01Z 81, 141675, 647053,*/
+static constexpr auto BENCH_EXAMPLE_01 = "b348f1fc4000f365818a9e2c01b44cf7ca0002b004f0b02003b33ef8ae3004b334f9e87005800d81c85e06b368fae26007b05ef2e14208be1a8093a50409b15cf5ee500a802c80a1420b802dea440c802ce50a0d802cdc320e802cd7220f802dd72210805380f74a118174f370126e96b32812127182c4701312817389d26414128035848c221512800e82bf3816126f81e4341712801082b228181280518af57418128040859a0019127182d0401a12803e858b641b127182c4421c126f82b3481d12811486b6301e12821d89e7281f126e8a8b421f127182d6642012806284c12021126e81d34822126e86a76222126e86d8102212805187b6542312800d82fc002412803d848e0e2512801082d27a26126e8589642612800e83a9602712800e83bd0028126e81ef1a29116e858d7228126f82db5e2912801083843c2a127181c93c2b126e85d0162b127181c5622c126e84f8262c12800f8392202d12800e82b66c2e126e81d0082f12803282d50430126e84f9003012805f84be6c3112846e88df0e2b12804080d44c340a8b31898808350a800ed760350b801083a1182b517182817e2a51800e82b6582951803583cb52420030806284cb6c204f7181d300204f82688ce0303e001d800e82bb200f488010808a182822a3289cd63041000a6fcd100a408a7caaa7024800002f803584e0741e27288f3386dd783b001000802683f27e004b8c44bcd0763f0000000000000000000100000e00"_hex_u8;
+/* 2023-04-04T00:26:50Z 90, 99930, 529375,*/
+static constexpr auto BENCH_EXAMPLE_02 = "815b80b61e00800da63001cd378da70e028010991a03800e9d3e0480109708058010991a068010973a07da738fa72408de7491831009b35b88f0080a9d4485de180b71974e0c71974e0d80108e500eb27988a75a0f719632108061a56c11801087761280108a1413807893441480538c1415a606828806168010893e1780548c40188e4b80bb2c196eab3e1718805ed60e18188051c97a19188010cf781a1871b11e1b1871c5281c1880508080581d186e80b13c1e188035cf421f18805fe0482018804caa661f198035a9001f156e80cb701d1871a2281e1871ad281f18817380a16020186f98642118805ee04821198010b6702219800ea12623196eb67024198035808b0025196fa65c26198054ba1c2719807680bf7c28198053cd782919803d80b80429198051db5a2a198040d3742b19976584bb1c28196efc1c281971b21a29198052bc762a1971a2502b196eb73c2c19976381ab0c2a18806290543409862081c3423b00336fbc70224d80109e7c1c52805ebd5c1942800eb57016468034ba423405158118da28350416927480f4743000159f6a81c9462e00188051ec5e380e00800e9e420775800d9e26007c906c82f754251d0025870480f12c14280023800d9e26027e9e1385ed08102900001a804fac7a018001719856028001800da87e0180039b1a868b60064102246e9f42018005800da87e028005850d81d600026d862381a2200e0008230015831480a5480342000524803eeb32006e873582a4700a0100351300"_hex_u8;
+/* 2023-05-08T15:51:59Z 87, 76869, 505222,*/
+static constexpr auto BENCH_EXAMPLE_03 = "c040b9e15a00b10eac842601805f85931802c104bae17403ae50aaa336049d76a9bf7005c55bbeab6606ae2aa9c72c07805e81992e08af7dab817a096e80a7e4520909803e92bd780a097185c76c0b096e98e7380b09850bb9953c0c09803389f6260d096f859d620e09803f88d3000f0971829c6e1009837690f6481109806285931811097181f56814076ea09b74120980408eb73213096f87853214096f86e2701509803f8c860016098a6fe6c3721709814f92a204180980628a8a441909803285df681a0980348498661b096e8290781c096e978e081c097187da1a1d097186c05c1e097185893c1f09805f8ad9002009800d84e74e21097183a67a22097182e23423097184b53a23096ea393062309840faddd46240980618eb732250980548bee6a2609807986883c2709718298402809815388b6582909805384ec742a097181b9142b096e97b5262b096e85e14e2c0980518abb5c2d09805489e75a2e09803187e3382f097180eb1c34046f87c34a2f098309a5c54430097186911831098054899c083209801083bc1033097081e02a3409805f848f0c35096e80d4343a057180c37040006f80a22438097180a0503f03816f8381444003803f80ef003f05800580a4283f066ef72845016efb91663e09923d808d8216470041803584837c46012f9247dc86684501268267a09610450222862184db68440712803585ea40440113835d97887805800b8723c7a40a4b00022f81529ae2143c0c1f80548b8f381b311980408e955c055e802589dc10037e801083b54602658010848130006700"_hex_u8;
+/* 2023-05-01T19:32:10Z 35, 55747, 504128,*/
+static constexpr auto BENCH_EXAMPLE_04 = "801af95c00801af72801801af95c02873e85f2180202873e85f2180202873e85f21802028018fb2802068018fb2803068018fb2804068018fb2805068018fb2806068018fb2807068018fb2808068018fb2809068018fb280a068018fb280a058018fb280b058018fb280c058018fb280d058018fb280e058018fb280f058018fb2810058018fb2811058018fb2812058018fb2813058018fb2814058018fb2815058018fb2815048018fb2816048018fb2817048018fb2818048018fb2819048018fb281a048018fb281b04810d80d9481f00000100"_hex_u8;
+/* 2023-02-27T17:06:38Z 60, 55680, 502749,*/
+static constexpr auto BENCH_EXAMPLE_05 = "b5108ab56600b26d89f85601b07383b01602b22683c96003b34a83d82e04b12f83b53a05b20e83c75a066e80840a06068040be0007066fb10608066fb2120906800eba320a06842b80b05a0a066eff420b067199300b068124c3140c0680618085180d066faa1c0e068010b4440f068051af541006800da1781106857881946812066eee1613068052b31014068324808d361506806180885c150671b03216066ef11017068052b63218066ef3521806803f80865419066e93441a068035a13e1b0680628085181c06806ec4481d068117e72c1e06719c721f068077c42420068159808d1821066eef0c21058010b90022056f9908230571993024058010b00a25058010b00a260580608087402705803fc10027068032b42828068051b6322906800db11e212a8324808d361933803ff400192f826381a7141a2f8032ac08152a800db54c044e8323808d3630010002018158d84000042d821cea12002807853580d462002d01891181d022002e00"_hex_u8;
+/* 2023-04-20T22:25:49Z 99, 49100, 578622,*/
+static constexpr auto BENCH_EXAMPLE_06 = "bf3c87c14c008010955a01b21d85e07002800d946c036e8e3404b77f86c26605b33c85f55e06bd06879852078010970a08bd4b87cf00098123a7720ab2158687680b8054d4440b0a8062fa4c0c0a71ac400d0a80628081540e0a8010a2580f0a8054b676100a8032b85c110a6e9a40120a6e809012130a817f80c31e140a8175808674150a719d46160a8172d86415098033c1481609800da4181709800ada2e1809803dc85219098034b4041a096ef5501b098052d67c1c098051d3281d09800ebc4a1e098175808c641f098061c55020098078c85021096e8081141f0b6faf1e200b8061da68210b8062f000220b800ebc20230b8035d058240b8053de32250b8050b610250b6fad32260b803dc276270b803d80a610280b6ef812290b8052b6322a0b800eb57e2b0b8052bd062c0b719e522d0b71a3762e0b8010bb1e2f0b80109a78310a80109962320a8051a60c330a6f9f3e320b6e808b24330b719e40340b8117cc50350b803d80971a360b8051b930370b6f9e0a380b719b10390b8052a6003a0b6e808c76390a7195603a0a6f935c3b0a8054a31a3c0a803ce30c3b0b803fa3003c0b800dbe2a3d0b8f3480a84244058005851a44069d1bf824400b83098f284507719c723d4f6f9c1c3449719c722f4f6eb23c304f8061c5502e528061da682b4e8118bb724e022a8054b35028476e941c1d51815be02c4f01148557808e3a4f070e8104af464e001180329d364e010d805f9f6a421b9c3387aa744c0d4d71ac400b800881748098444710338173809b780b80008054d444292c12821dc040550403078b4682b4664517003f00"_hex_u8;
+/* 2023-06-05T19:56:12Z 52, 44896, 540514,*/
+static constexpr auto BENCH_EXAMPLE_07 = "b317998a4000b40098d53e01b45b99814802b7289b940003b3699a9d1204b6619a807a05814682cb78050571d854060571d8540705800e808d7a0805803480c06a09056e8189280a056ffd060b05800d80ea7a0c05803c80b80c0c03803e80d86e0d036ed2280e03811581804a0f036fd34e1003805380eb6811036e81f60e12038010ec101204805f80e83a13048033809534140471e00a15048010f95816046e81fa301704805180a74c1705800d808f1018056fd55c1905800e8091481a056e80a76e1b05805f80e2741c0571809b021c05826382c8401d0571df201e05800e809d2c1f05850083e87c1f05811580af68200571f20a21056ff9042205803e80df1e23056e81956c24056e9f542604805180e83829000e800e8080621325803380b0402a020d6ef8100e2c8c4889a96a2c000f803580ce4c2c000b6e9f54062a803480c96406260500"_hex_u8;
+/* 2023-12-05T23:48:44Z 69, 44283, 586734,*/
+static constexpr auto BENCH_EXAMPLE_08 = "83728ce80000b90befca1001806083b24002b40de6da3203b545e9c35c04b34beede3005b068e8883006d41c80b1e14c07b337e7841208b26beadb2e096e83892e090980518487380a096e82815c0a096e81ce3c0b097181db200c097181d4020d09810084ed600e096e96b0100f0971819a0210086e93da2e0f09803583ee5e1009803583c66c1109800d82bb6e1209800d81d56a1309803c82e622140971819f521509803d84a55c15057181d6161605806283ac5217056e949c5a18056e89e8641806815889e23419067181de321a066e8af2641a076e82a70a1b07803583f2081c076f81e76e1d076e81d33e1e07800d83b8761e086e82a5541f087181de302008805f84ad0021086e81c74022086e81bd3e23086e9288182408806184b3102409803283816025096e91ed662609830a88e70827096e81d14a27097181ce6028096e8cf03829097181883832016f81835c3103806181e0103203804180b8103204863584fe183304800de66434046e9e4c34056e81d6742f429213c0eb602e3d6483b06c283a6e81d73c263d6e82f9581831805485ab360e37805080c62609398b3189880838010603916db1f3583a03000110873199f8623c000000011100"_hex_u8;
+/* 2023-04-14T19:36:52Z 77, 20418, 501117,*/
+static constexpr auto BENCH_EXAMPLE_09 = "bf2989d00400815bca5c01af1e86f97602800d9d6c03800d8a3404b47988866e05b36287f92e0680109f68078010991a08805ecf1208076e80933e09078062d01c0a078054b6760b078053b6760c076f9c1c0d078054b6760e0771af260f0771b17e10078032f57011078035d56812078054e1581307886b83dc301407817480d13013068005a6001406803d80821a15066ef3201606800ea2181706800da628180671ab1219068054db0c1a06719b001b06815b80a11c1c068050b9301d066fac2a1e068033ab481f06719b1020068035ab721e07803dc2761f0771ae3c20078040f60e210771ce282207800ea4322307882a81a66024078035ad4625076efe7e26078162808e1827078118bb7228076eac7428088010bf58290871a04c2a0871bc722b086fa8382c08803d80a0142d088035d6282e088051c30c2f086efc623008800d9f6231086f986432088117bb7237028010a63034068010c84e2740800ea64c2237832c80933e1f3b830880c454390208813c80955c3905068032c73611348010a03c093c837a808a101b278050ac34093a8051ac34291b8f3b8187401d28881a82cb3a3a0a37977b86d20843000028996686a7083f030f8078d3761b27106e995a08499070839b5a1131000b00"_hex_u8;
+/* 2023-11-07T17:59:35Z 48, 4792, 498995,*/
+static constexpr auto BENCH_EXAMPLE_10 = "875f89aa1000b51ec09d7201c55cc7a72e02a11aa1fb3203b233a7f95204800ef56205b33ea9d13006803e80b26e07d90ec9dd4008b45eabbe6c09806080ca000a815984e8680a0a6f80925e0a0a803f80e1660c09937c94b7420d086e82f5640a086e80997e0b086f808d320c08800580a5640d086f8089100e08804080c9060f088115819a1c10086e82961a0f0a805f81bc0a100a6ff826110a6ef53e120a807584c60c110a6e818f32120a803c81c246130a805481d508140a8159838410150a7180a55c160a6f80821c170a6fe6101c066fe6101d06805080f854190a6e81b27c1a0a8155819c701e06805180ae0c21046e8b9a222501805180f53422001680f26880f8a62a220116803580da582007058153838e6e21000c800d80a712033a807681ae1c23000308834a82d36023020205815981e03a051a08001700"_hex_u8;
+/* 2023-11-16T10:47:08Z 77, 473962, 486863,*/
+static constexpr auto BENCH_EXAMPLE_11 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801980c06008801980c06009801980c0600a801980c0600b801980c0600c801980c0600d801980c0600e801980c0600f801980c060108019d12c11800f80b1601111800f80b1601111801080b1601111800f80b160100e800f80b160100f801980c060110f800f80b160140d801180b1601111801180b160100d801180b160120c801180b1600f10801180b1600f11801980c0601011800f80b160140e800f80b160110f801980c060170a801180b1601210801980c060140f800f80b1601311801980c0602005801180b1601f07800f80b1601b0c800fca7c1611812081f9601638812081f9601637812081fb001636801080b160142f801980c0600e2a801080b1600f2a801180b1600d25801980c0600e25800f80b1600d27801980c0600e27801980c0600d27801180b1600e26812080b1500c27812081f960201025812081f960200f27812081fc201d101c812081fc201d101d812081fc201d0f1f812081fc201d0f20812081f9601b1016800f80b1600a35800f80b1600a36800f80b1600e32801080b160122f812081f960280040812081fc20121d1b812081f960112713812081f960160d37812081fc20140d2b812081f960130d2d812081fc20130c2c812081fb001b0157812081fb001a0245812081fc20140030812081fc20092747812081fb000b152500"_hex_u8;
+/* 2023-10-06T20:44:09Z 40, 341438, 341438,*/
+static constexpr auto BENCH_EXAMPLE_12 = "80318f4c0080318f4c0180318f4c0280318f4c0380318f4c0480318f4c0580318f4c0680318f4c078033a57807078033a57807078033a57807078033a57807078033a57807078033a57807078033a57807078033a578070780318f4c0e0180318f4c0d0380318f4c0c0580318f4c0b078033a57803128033a57803128033a57803128033a578031280318f4c0412810b9c28140300810c9c281303028033a57802188033a57802188033a5780218810c9c280b01108033a578001c810c9c2807050f8033a578001b810c98040700158033a578001c810c98040301158033a5780019806ca1240101118033a578001300"_hex_u8;
+/* 2023-11-15T21:40:46Z 96, 23608, 138286,*/
+static constexpr auto BENCH_EXAMPLE_13 = "8060829f4000b157bab07a01b27cc2b16802b22fbce54603826480a95804803da81a05bc7bcac93806800de55207800daf0608805bc71809805bc7180a800d9d4a0b805bbc700c8152d7180d805bb9380e850a8886260f800d80d33410bf38d3d55011b41dc4eb6012bd70d2ce2e138d3596af7812137180cd501313805e81f7281413718092001513803d81f90016136e8b916c1713801081861a17106e80cd2a18106f80cc3c19106e80cf161911800d80fe781b107180d87c1c106e80fb081d10803e8286701d11800d81c4781f10804082a6002010801081912e21107180ff0021116e81da4a2310850b8b864023116e89db3224116e84ff7e2610897c95993427106f80bb1a240b803581c272250b8032828c10260b6e80d42a270b804082b35a280b800d80fe3e290b805cc0282312821d8697022b0b6e8add562c0b805281c8063007811883f1082313800d80fe3e24137180c9142513800d8380102613803382c00e2713805eb32228136e8494542913800e8186742913806082b74c2a1380528285782b13800d818f7a2c136e84a5562d1380508286702e136f80a46e3e04803f8191364102805481ad4c3d076e809a5a3e077180fe4032136e838b7233138c4790cf384106853584ab624206805b80932a4801806280966c48028168ef04400b7181bd524903806282db5c375b9316acbf703a599c68c5a454385c6e81d63e364a6f80ff64334e817485a6784f023171819536234e800d81826e1e498053829a12420018834c87cb14291d2e840e8bc94c1d2825800d81b7220368811783fe0e271f1f811783e758380f001ecd55809edf6e56000000003a815984ba76008010d54d80aebb4e2c22000000000000002c807682f150007a00"_hex_u8;
+/* 2023-12-06T09:18:20Z 93, 68130, 122830,*/
+static constexpr auto BENCH_EXAMPLE_14 = "b26beadb2e00800d80ca0a01d41c80b1e14c02b068e8883003800d81af1604b34beede30056e80b14006b151f5d46c07b93e8085b02608b30cf98b1009b14ef6b3040ab176f6ab480bb7078082b8640c800d81c6460d802c80a8080e802c80a8080f802c80a14210802ce50a11802cd722127181ce6012126e81d14a13126e9b8b00141282428dd42c15128051828408150e6e81bd3e150f805f84ad00160f7181de30170f6e81c740180f800d83b876190f6e82a5541a0f6e81d33e1a106e82a70a1b106f81e76e1c10803583f2081d106e82d9401e106e96e4441f107181de321e12815889e2341f127182d60c20126e979d4e21126e8282262410800d82972c25106f838a5822126f82842a23127182d24a2412803e84bc2a2512800d83c81a26126e84f8142712805085a22c27126e889e6a2812801083aa50281280348598102912801082d5522a126e85865c2b127182c7602b1282468c82042c126e84972c2d12805485d93a2d12801083c7322e12815386e1582f126e84fb0c30126f82eb6c3011813a85b47a3111803f869f5c3211805181ed30370d6e84bf0a3411804180e1383809815883aa183a08815a8392203e05807681f140380c6e9e4c4005805485ab363255805183856030406e82f9582c45805185c1001b4f82418df1001a4e803283c50e430026800d83a6201a4b836886be3044010b8b318988084c0101803183a6120776800d828a1e087682338ae050301c33873199f8624d010032813986bc663c1034800d83a5220a6f800d82be52048000805183e364084907800d83cc4a018005815987b41e1832000017884b9dce72035035803284c11e00800885769d9538192f0000000002001000"_hex_u8;
+/* 2023-12-14T02:02:29Z 55, 247754, 247754,*/
+static constexpr auto BENCH_EXAMPLE_15 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801980c06008801980c06009801980c0600a801980c0600b801980c0600c801980c0600d801980c0600e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600d07801180b1600f06801180b1600c0a801180b1600f08801180b1600c0c801180b1600c0d801180b1600c0e801180b160100b801180b1601309812081fc200e2a812081fc200e29812081fc200e28812081fc200e0e18812081fc200e0e17801980c060042e812081fc200e0d07812081fc200e0d08812081fc200e0c0a812081fc200e0d0a801980c060081e812081fc200f0c0c812081fc200f0c0d812081fc200f0c0e801180b160083a801180b1600426801980c0600b20801980c0600a22812081fc200f0b30801180b160022b801180b160022b812081fc20062422812081fc2006220b812081fc200c0a1e812081fc2012041a00"_hex_u8;
+/* 2023-12-14T15:17:20Z 76, 102600, 103935,*/
+static constexpr auto BENCH_EXAMPLE_16 = "801980c06000801980c06001801980c06002801980c06003801980c06004801180b1600404801180b1600404801180b1600404801980c0600504801980c0600802801980c0600803801180b1600704801980c0600804801280b1600804812081fc200810812081fc20080f812081fc20080e801180b160080c800f80b160080d801980c060090d801180b160090e801980c0600a0e812181fc200a0c801180b1600a0d812181fd400a0c801980c0600a1c801980c0600916801180b1600719801180b160061b801980c0600d15801980c0600717812081fc200718801980c0600716801180b160072d801180b1600722801180b1600525801980c060091b801980c060071e801080b160071f801280b160061d812081fc20063a812181f960160815801280b1600525801980c0600625801180b1600626801980c0600726801980c0600536801180b160032b801980c060042b801280b160032d801980c060033e801180b160043e812181fc20100c27801080b160042f801980c0600342801180b1600442812081fc20150d25800f80b1600245812081fd40120619812081fc20040243812081fc20120c2c812081fd40120a1d812181fb00100623812081fc20030347812081fc20072126801980c0600236812081fc20040d2b812081fc20120328801980c0600237801180b1600337812081fc20052230801180b1600239812081fc2008242c812081fd4005112d812081fb00070b32812081f96011034700"_hex_u8;
+/* 2023-12-15T07:12:29Z 98, 112693, 112730,*/
+static constexpr auto BENCH_EXAMPLE_17 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801180b1600606801180b1600606801180b1600606801180b1600606801280b1600606801180b1600606801180b1600606801980c0600d00801980c0600b03801980c0600b04801980c0600f01812081fc200a16812081fc200a15812081fc200a14812081fc200a13812081fd400a12812181fc200a11812181fc200a0f801180b1600a10801180b1600a10801980c0600a10801180b1600b10801180b1600b10801980c0600621801980c0600915801980c060041b801180b160051b801980c0600f12801980c0600f13801980c0600d15801980c0600c17801980c060072e800f80b160082e812181fc200d150e801980c0600922801180b1600923801980c0600823801180b1600623801180b1600a20801180b1600e1c801180b1600b20801180b1600b21801980c0600a3e800f80b1600b3e801980c0600931801180b1600a31812181fc20140325801180b1600a30801180b160054c801180b160043b801980c0600336812181fc200253812081f960090944812081fc2007003c801980c0600339801180b1600433801980c0600453801980c0600340801980c060033d801080b160043d812081f960070854801980c060045a801180b160055a801180b1600545801980c0600643801980c0600641801280b1600739801180b1600562812081fc20121f27812181fc20210137812181fc2016112f801980c0600259801980c0600156812181fc20053a31801180b160025c801180b1600257801980c0600357812081fc200d2d1e812181fc20102444812181fc20035a801180b160035b801980c0600751812181fc2007392a812181fc20025f801980c060045e801180b1600350812081fc20070f6f801180b1600263812181fc201b1322812181fc2011283b812081fc2002442100"_hex_u8;
+/* 2023-12-16T02:25:33Z 99, 112399, 112399,*/
+static constexpr auto BENCH_EXAMPLE_18 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801180b16008801180b16009801180b1600a801180b1600a0a801180b1600a0a801180b1600a0a801180b1600a0a801980c0600d06801180b1600b09801980c0601005801180b1600c0a801980c0600d0a801980c0601106801180b1600e0a801980c0601207801980c0601207801180b160100a812081e668100a812081e668100a812081e668100a801980c0601407801980c0601606812081fc201226812081fc201225812081fc201224812081fc201223801180b1600e21801980c0600b1e801180b1600c1e801180b1601316801980c060091b801980c0601312801980c0600a1c801180b160190e801180b1601315801180b1600e1b801180b1601713801180b1600f1c801980c0600d34801980c0600d30801980c060102e801980c060122d801980c0600b2a801980c0600b2a801980c0600b2b801180b1601122801180b1600e26801180b1601025801180b1600f26812081fc20280032812081fc20270034812081fc20250034801180b1600d4b801980c0600d457a809a000d46801980c0601044801980c0600e46801180b1600f43801180b160123f801180b160123e801180b1601130801180b1601131801180b1601131812081fc20230a36801980c0600a5a801180b1600a5b801980c0600a5b801180b1600b5b801980c0600b5a801180b1600f57801180b1600d3f801980c0600669801980c0600568801980c0600466801180b1600945801180b1600649801180b1600945812081fc2018234b812081fc20142534812081fc20142532812081fc20142530801180b160074d801180b1600a4b801180b1600a4a812081fc20221662812081fc200c0472812081fc20072e42812081fc20062c23812081fc20100572812081fc200f036c812081fc2001345100"_hex_u8;
+/* 2023-03-31T19:24:02Z 78, 90393, 152832,*/
+static constexpr auto BENCH_EXAMPLE_19 = "800dd042008028b13c018028b13c028028b13c038029b13c048029b13c058029b13c0680299948078029b13c088029b13c09802899480a802899480b8028b13c0c80299e700d802899480e802999480f8029b13c10802999481180299948128028b13c138029b13c1480289e701580289948168028b13c1780289948188028994819802899481a802999481b802999481c802899481d802999481e8028b13c1f8029b13c20802999482180299948228028b13c2380298c242480289948258029b13c2680288c242780298c242880299e70298f5a80ea762a824780aa00292a82038090402429813fcf00152a8203809040142a813ff700112982038090402d002d813ff70028002c8203809040270024824780aa00270025820380904025002882038090401e022a82038090401d042782038090401c01298203809040190029813ff700170028813ff700140128807b9258120128841280f6402c01002e82038090402b00062b820380904027000031813ff70011192d82038090401d000129851981a9403a0000003b82038090400c182e813ff7000b0f2982038090401314141b807b925805192b84568190001121000334807bdd400149824780aa00001f2a813ff700003d0b8203809040050d1915807bdd4001498728828f400b010004050501000a050c851981a9400104050b061a0400"_hex_u8;
+
+static void LinearizeOptimallyExample00(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_00); }
+static void LinearizeOptimallyExample01(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_01); }
+static void LinearizeOptimallyExample02(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_02); }
+static void LinearizeOptimallyExample03(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_03); }
+static void LinearizeOptimallyExample04(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_04); }
+static void LinearizeOptimallyExample05(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_05); }
+static void LinearizeOptimallyExample06(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_06); }
+static void LinearizeOptimallyExample07(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_07); }
+static void LinearizeOptimallyExample08(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_08); }
+static void LinearizeOptimallyExample09(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_09); }
+static void LinearizeOptimallyExample10(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_10); }
+static void LinearizeOptimallyExample11(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_11); }
+static void LinearizeOptimallyExample12(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_12); }
+static void LinearizeOptimallyExample13(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_13); }
+static void LinearizeOptimallyExample14(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_14); }
+static void LinearizeOptimallyExample15(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_15); }
+static void LinearizeOptimallyExample16(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_16); }
+static void LinearizeOptimallyExample17(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_17); }
+static void LinearizeOptimallyExample18(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_18); }
+static void LinearizeOptimallyExample19(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_19); }
+
+BENCHMARK(Linearize16TxWorstCase20Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize16TxWorstCase120Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize32TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize32TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize48TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize48TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize64TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize64TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize75TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize75TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize99TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize99TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
BENCHMARK(LinearizeNoIters16TxWorstCaseAnc, benchmark::PriorityLevel::HIGH);
BENCHMARK(LinearizeNoIters32TxWorstCaseAnc, benchmark::PriorityLevel::HIGH);
@@ -276,3 +396,24 @@ BENCHMARK(MergeLinearizations48TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations64TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations75TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations99TxWorstCase, benchmark::PriorityLevel::HIGH);
+
+BENCHMARK(LinearizeOptimallyExample00, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample01, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample02, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample03, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample04, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample05, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample06, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample07, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample08, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample09, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample10, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample11, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample12, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample13, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample14, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample15, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample16, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample17, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample18, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample19, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/streams_findbyte.cpp b/src/bench/streams_findbyte.cpp
index 5098262e9a..004bf8ffc9 100644
--- a/src/bench/streams_findbyte.cpp
+++ b/src/bench/streams_findbyte.cpp
@@ -19,7 +19,7 @@ static void FindByte(benchmark::Bench& bench)
uint8_t data[file_size] = {0};
data[file_size-1] = 1;
file << data;
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
BufferedFile bf{file, /*nBufSize=*/file_size + 1, /*nRewindIn=*/file_size};
bench.run([&] {
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index ebe013b638..9cbafa233d 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -283,8 +283,6 @@ int main(int argc, char* argv[])
epilogue:
// Without this precise shutdown sequence, there will be a lot of nullptr
// dereferencing and UB.
- if (chainman.m_thread_load.joinable()) chainman.m_thread_load.join();
-
validation_signals.FlushBackgroundCallbacks();
{
LOCK(cs_main);
diff --git a/src/chain.h b/src/chain.h
index c46392c535..13f7582385 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -178,7 +178,7 @@ public:
//! Verification status of this block. See enum BlockStatus
//!
//! Note: this value is modified to show BLOCK_OPT_WITNESS during UTXO snapshot
- //! load to avoid the block index being spuriously rewound.
+ //! load to avoid a spurious startup failure requiring -reindex.
//! @sa NeedsRedownload
//! @sa ActivateSnapshot
uint32_t nStatus GUARDED_BY(::cs_main){0};
diff --git a/src/cluster_linearize.h b/src/cluster_linearize.h
index 607ae681d2..e964849f22 100644
--- a/src/cluster_linearize.h
+++ b/src/cluster_linearize.h
@@ -118,6 +118,28 @@ public:
}
}
+ /** Construct a DepGraph object given another DepGraph and a mapping from old to new.
+ *
+ * Complexity: O(N^2) where N=depgraph.TxCount().
+ */
+ DepGraph(const DepGraph<SetType>& depgraph, Span<const ClusterIndex> mapping) noexcept : entries(depgraph.TxCount())
+ {
+ Assert(mapping.size() == depgraph.TxCount());
+ // Fill in fee, size, ancestors.
+ for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ const auto& input = depgraph.entries[i];
+ auto& output = entries[mapping[i]];
+ output.feerate = input.feerate;
+ for (auto j : input.ancestors) output.ancestors.Set(mapping[j]);
+ }
+ // Fill in descendant information.
+ for (ClusterIndex i = 0; i < entries.size(); ++i) {
+ for (auto j : entries[i].ancestors) {
+ entries[j].descendants.Set(i);
+ }
+ }
+ }
+
/** Get the number of transactions in the graph. Complexity: O(1). */
auto TxCount() const noexcept { return entries.size(); }
/** Get the feerate of a given transaction i. Complexity: O(1). */
@@ -257,6 +279,14 @@ struct SetInfo
explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
transactions(txn), feerate(depgraph.FeeRate(txn)) {}
+ /** Add a transaction to this SetInfo (which must not yet be in it). */
+ void Set(const DepGraph<SetType>& depgraph, ClusterIndex pos) noexcept
+ {
+ Assume(!transactions[pos]);
+ transactions.Set(pos);
+ feerate += depgraph.FeeRate(pos);
+ }
+
/** Add the transactions of other to this SetInfo (no overlap allowed). */
SetInfo& operator|=(const SetInfo& other) noexcept
{
@@ -506,6 +536,12 @@ public:
return m_todo.None();
}
+ /** Count the number of remaining unlinearized transactions. */
+ ClusterIndex NumRemaining() const noexcept
+ {
+ return m_todo.Count();
+ }
+
/** Find the best (highest-feerate, smallest among those in case of a tie) ancestor set
* among the remaining transactions. Requires !AllDone().
*
@@ -541,23 +577,60 @@ class SearchCandidateFinder
{
/** Internal RNG. */
InsecureRandomContext m_rng;
- /** Internal dependency graph for the cluster. */
- const DepGraph<SetType>& m_depgraph;
- /** Which transactions are left to do (sorted indices). */
+ /** m_sorted_to_original[i] is the original position that sorted transaction position i had. */
+ std::vector<ClusterIndex> m_sorted_to_original;
+ /** m_original_to_sorted[i] is the sorted position original transaction position i has. */
+ std::vector<ClusterIndex> m_original_to_sorted;
+ /** Internal dependency graph for the cluster (with transactions in decreasing individual
+ * feerate order). */
+ DepGraph<SetType> m_sorted_depgraph;
+ /** Which transactions are left to do (indices in m_sorted_depgraph's order). */
SetType m_todo;
+ /** Given a set of transactions with sorted indices, get their original indices. */
+ SetType SortedToOriginal(const SetType& arg) const noexcept
+ {
+ SetType ret;
+ for (auto pos : arg) ret.Set(m_sorted_to_original[pos]);
+ return ret;
+ }
+
+ /** Given a set of transactions with original indices, get their sorted indices. */
+ SetType OriginalToSorted(const SetType& arg) const noexcept
+ {
+ SetType ret;
+ for (auto pos : arg) ret.Set(m_original_to_sorted[pos]);
+ return ret;
+ }
+
public:
/** Construct a candidate finder for a graph.
*
* @param[in] depgraph Dependency graph for the to-be-linearized cluster.
* @param[in] rng_seed A random seed to control the search order.
*
- * Complexity: O(1).
+ * Complexity: O(N^2) where N=depgraph.Count().
*/
- SearchCandidateFinder(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed) noexcept :
+ SearchCandidateFinder(const DepGraph<SetType>& depgraph, uint64_t rng_seed) noexcept :
m_rng(rng_seed),
- m_depgraph(depgraph),
- m_todo(SetType::Fill(depgraph.TxCount())) {}
+ m_sorted_to_original(depgraph.TxCount()),
+ m_original_to_sorted(depgraph.TxCount()),
+ m_todo(SetType::Fill(depgraph.TxCount()))
+ {
+ // Determine reordering mapping, by sorting by decreasing feerate.
+ std::iota(m_sorted_to_original.begin(), m_sorted_to_original.end(), ClusterIndex{0});
+ std::sort(m_sorted_to_original.begin(), m_sorted_to_original.end(), [&](auto a, auto b) {
+ auto feerate_cmp = depgraph.FeeRate(a) <=> depgraph.FeeRate(b);
+ if (feerate_cmp == 0) return a < b;
+ return feerate_cmp > 0;
+ });
+ // Compute reverse mapping.
+ for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ m_original_to_sorted[m_sorted_to_original[i]] = i;
+ }
+ // Compute reordered dependency graph.
+ m_sorted_depgraph = DepGraph(depgraph, m_original_to_sorted);
+ }
/** Check whether any unlinearized transactions remain. */
bool AllDone() const noexcept
@@ -580,12 +653,15 @@ public:
* be <= max_iterations. If strictly < max_iterations, the
* returned subset is optimal.
*
- * Complexity: O(N * min(max_iterations, 2^N)) where N=depgraph.TxCount().
+ * Complexity: possibly O(N * min(max_iterations, sqrt(2^N))) where N=depgraph.TxCount().
*/
std::pair<SetInfo<SetType>, uint64_t> FindCandidateSet(uint64_t max_iterations, SetInfo<SetType> best) noexcept
{
Assume(!AllDone());
+ // Convert the provided best to internal sorted indices.
+ best.transactions = OriginalToSorted(best.transactions);
+
/** Type for work queue items. */
struct WorkItem
{
@@ -596,16 +672,27 @@ public:
/** Set of undecided transactions. This must be a subset of m_todo, and have no overlap
* with inc. The set (inc | und) must be topologically valid. */
SetType und;
+ /** (Only when inc is not empty) The best feerate of any superset of inc that is also a
+ * subset of (inc | und), without requiring it to be topologically valid. It forms a
+ * conservative upper bound on how good a set this work item can give rise to.
+ * Transactions whose feerate is below best's are ignored when determining this value,
+ * which means it may technically be an underestimate, but if so, this work item
+ * cannot result in something that beats best anyway. */
+ FeeFrac pot_feerate;
/** Construct a new work item. */
- WorkItem(SetInfo<SetType>&& i, SetType&& u) noexcept :
- inc(std::move(i)), und(std::move(u)) {}
+ WorkItem(SetInfo<SetType>&& i, SetType&& u, FeeFrac&& p_f) noexcept :
+ inc(std::move(i)), und(std::move(u)), pot_feerate(std::move(p_f))
+ {
+ Assume(pot_feerate.IsEmpty() == inc.feerate.IsEmpty());
+ }
/** Swap two WorkItems. */
void Swap(WorkItem& other) noexcept
{
swap(inc, other.inc);
swap(und, other.und);
+ swap(pot_feerate, other.pot_feerate);
}
};
@@ -613,39 +700,111 @@ public:
VecDeque<WorkItem> queue;
queue.reserve(std::max<size_t>(256, 2 * m_todo.Count()));
- // Create an initial entry with m_todo as undecided. Also use it as best if not provided,
- // so that during the work processing loop below, and during the add_fn/split_fn calls, we
- // do not need to deal with the best=empty case.
- if (best.feerate.IsEmpty()) best = SetInfo(m_depgraph, m_todo);
- queue.emplace_back(SetInfo<SetType>{}, SetType{m_todo});
+ // Create initial entries per connected component of m_todo. While clusters themselves are
+ // generally connected, this is not necessarily true after some parts have already been
+ // removed from m_todo. Without this, effort can be wasted on searching "inc" sets that
+ // span multiple components.
+ auto to_cover = m_todo;
+ do {
+ auto component = m_sorted_depgraph.FindConnectedComponent(to_cover);
+ to_cover -= component;
+ // If best is not provided, set it to the first component, so that during the work
+ // processing loop below, and during the add_fn/split_fn calls, we do not need to deal
+ // with the best=empty case.
+ if (best.feerate.IsEmpty()) best = SetInfo(m_sorted_depgraph, component);
+ queue.emplace_back(/*inc=*/SetInfo<SetType>{},
+ /*und=*/std::move(component),
+ /*pot_feerate=*/FeeFrac{});
+ } while (to_cover.Any());
/** Local copy of the iteration limit. */
uint64_t iterations_left = max_iterations;
+ /** The set of transactions in m_todo which have feerate > best's. */
+ SetType imp = m_todo;
+ while (imp.Any()) {
+ ClusterIndex check = imp.Last();
+ if (m_sorted_depgraph.FeeRate(check) >> best.feerate) break;
+ imp.Reset(check);
+ }
+
/** Internal function to add an item to the queue of elements to explore if there are any
- * transactions left to split on, and to update best.
+ * transactions left to split on, possibly improving it before doing so, and to update
+ * best/imp.
*
* - inc: the "inc" value for the new work item (must be topological).
* - und: the "und" value for the new work item ((inc | und) must be topological).
*/
auto add_fn = [&](SetInfo<SetType> inc, SetType und) noexcept {
+ /** SetInfo object with the set whose feerate will become the new work item's
+ * pot_feerate. It starts off equal to inc. */
+ auto pot = inc;
if (!inc.feerate.IsEmpty()) {
+ // Add entries to pot. We iterate over all undecided transactions whose feerate is
+ // higher than best. While undecided transactions of lower feerate may improve pot,
+ // the resulting pot feerate cannot possibly exceed best's (and this item will be
+ // skipped in split_fn anyway).
+ for (auto pos : imp & und) {
+ // Determine if adding transaction pos to pot (ignoring topology) would improve
+ // it. If not, we're done updating pot. This relies on the fact that
+ // m_sorted_depgraph, and thus the transactions iterated over, are in decreasing
+ // individual feerate order.
+ if (!(m_sorted_depgraph.FeeRate(pos) >> pot.feerate)) break;
+ pot.Set(m_sorted_depgraph, pos);
+ }
+
+ // The "jump ahead" optimization: whenever pot has a topologically-valid subset,
+ // that subset can be added to inc. Any subset of (pot - inc) has the property that
+ // its feerate exceeds that of any set compatible with this work item (superset of
+ // inc, subset of (inc | und)). Thus, if T is a topological subset of pot, and B is
+ // the best topologically-valid set compatible with this work item, and (T - B) is
+ // non-empty, then (T | B) is better than B and also topological. This is in
+ // contradiction with the assumption that B is best. Thus, (T - B) must be empty,
+ // or T must be a subset of B.
+ //
+ // See https://delvingbitcoin.org/t/how-to-linearize-your-cluster/303 section 2.4.
+ const auto init_inc = inc.transactions;
+ for (auto pos : pot.transactions - inc.transactions) {
+ // If the transaction's ancestors are a subset of pot, we can add it together
+ // with its ancestors to inc. Just update the transactions here; the feerate
+ // update happens below.
+ auto anc_todo = m_sorted_depgraph.Ancestors(pos) & m_todo;
+ if (anc_todo.IsSubsetOf(pot.transactions)) inc.transactions |= anc_todo;
+ }
+ // Finally update und and inc's feerate to account for the added transactions.
+ und -= inc.transactions;
+ inc.feerate += m_sorted_depgraph.FeeRate(inc.transactions - init_inc);
+
// If inc's feerate is better than best's, remember it as our new best.
if (inc.feerate > best.feerate) {
best = inc;
+ // See if we can remove any entries from imp now.
+ while (imp.Any()) {
+ ClusterIndex check = imp.Last();
+ if (m_sorted_depgraph.FeeRate(check) >> best.feerate) break;
+ imp.Reset(check);
+ }
}
+
+ // If no potential transactions exist beyond the already included ones, no
+ // improvement is possible anymore.
+ if (pot.feerate.size == inc.feerate.size) return;
+ // At this point und must be non-empty. If it were empty then pot would equal inc.
+ Assume(und.Any());
} else {
Assume(inc.transactions.None());
+ // If inc is empty, we just make sure there are undecided transactions left to
+ // split on.
+ if (und.None()) return;
}
- // Make sure there are undecided transactions left to split on.
- if (und.None()) return;
-
// Actually construct a new work item on the queue. Due to the switch to DFS when queue
// space runs out (see below), we know that no reallocation of the queue should ever
// occur.
Assume(queue.size() < queue.capacity());
- queue.emplace_back(std::move(inc), std::move(und));
+ queue.emplace_back(/*inc=*/std::move(inc),
+ /*und=*/std::move(und),
+ /*pot_feerate=*/std::move(pot.feerate));
};
/** Internal process function. It takes an existing work item, and splits it in two: one
@@ -659,18 +818,66 @@ public:
Assume(elem.inc.transactions.IsSubsetOf(m_todo) && elem.und.IsSubsetOf(m_todo));
// Included transactions cannot be undecided.
Assume(!elem.inc.transactions.Overlaps(elem.und));
+ // If pot is empty, then so is inc.
+ Assume(elem.inc.feerate.IsEmpty() == elem.pot_feerate.IsEmpty());
+
+ const ClusterIndex first = elem.und.First();
+ if (!elem.inc.feerate.IsEmpty()) {
+ // If no undecided transactions remain with feerate higher than best, this entry
+ // cannot be improved beyond best.
+ if (!elem.und.Overlaps(imp)) return;
+ // We can ignore any queue item whose potential feerate isn't better than the best
+ // seen so far.
+ if (elem.pot_feerate <= best.feerate) return;
+ } else {
+ // In case inc is empty use a simpler alternative check.
+ if (m_sorted_depgraph.FeeRate(first) <= best.feerate) return;
+ }
- // Pick the first undecided transaction as the one to split on.
- const ClusterIndex split = elem.und.First();
+ // Decide which transaction to split on. Splitting is how new work items are added, and
+ // how progress is made. One split transaction is chosen among the queue item's
+ // undecided ones, and:
+ // - A work item is (potentially) added with that transaction plus its remaining
+ // descendants excluded (removed from the und set).
+ // - A work item is (potentially) added with that transaction plus its remaining
+ // ancestors included (added to the inc set).
+ //
+ // To decide what to split on, consider the undecided ancestors of the highest
+ // individual feerate undecided transaction. Pick the one which reduces the search space
+ // most. Let I(t) be the size of the undecided set after including t, and E(t) the size
+ // of the undecided set after excluding t. Then choose the split transaction t such
+ // that 2^I(t) + 2^E(t) is minimal, tie-breaking by highest individual feerate for t.
+ ClusterIndex split = 0;
+ const auto select = elem.und & m_sorted_depgraph.Ancestors(first);
+ Assume(select.Any());
+ std::optional<std::pair<ClusterIndex, ClusterIndex>> split_counts;
+ for (auto t : select) {
+ // Call max = max(I(t), E(t)) and min = min(I(t), E(t)). Let counts = {max,min}.
+ // Sorting by the tuple counts is equivalent to sorting by 2^I(t) + 2^E(t). This
+ // expression is equal to 2^max + 2^min = 2^max * (1 + 1/2^(max - min)). The second
+ // factor (1 + 1/2^(max - min)) there is in (1,2]. Thus increasing max will always
+ // increase it, even when min decreases. Because of this, we can first sort by max.
+ std::pair<ClusterIndex, ClusterIndex> counts{
+ (elem.und - m_sorted_depgraph.Ancestors(t)).Count(),
+ (elem.und - m_sorted_depgraph.Descendants(t)).Count()};
+ if (counts.first < counts.second) std::swap(counts.first, counts.second);
+ // Remember the t with the lowest counts.
+ if (!split_counts.has_value() || counts < *split_counts) {
+ split = t;
+ split_counts = counts;
+ }
+ }
+ // Since there was at least one transaction in select, we must always find one.
+ Assume(split_counts.has_value());
// Add a work item corresponding to exclusion of the split transaction.
- const auto& desc = m_depgraph.Descendants(split);
+ const auto& desc = m_sorted_depgraph.Descendants(split);
add_fn(/*inc=*/elem.inc,
/*und=*/elem.und - desc);
// Add a work item corresponding to inclusion of the split transaction.
- const auto anc = m_depgraph.Ancestors(split) & m_todo;
- add_fn(/*inc=*/elem.inc.Add(m_depgraph, anc),
+ const auto anc = m_sorted_depgraph.Ancestors(split) & m_todo;
+ add_fn(/*inc=*/elem.inc.Add(m_sorted_depgraph, anc),
/*und=*/elem.und - anc);
// Account for the performed split.
@@ -713,7 +920,9 @@ public:
split_fn(std::move(elem));
}
- // Return the found best set and the number of iterations performed.
+ // Return the found best set (converted to the original transaction indices), and the
+ // number of iterations performed.
+ best.transactions = SortedToOriginal(best.transactions);
return {std::move(best), max_iterations - iterations_left};
}
@@ -723,9 +932,10 @@ public:
*/
void MarkDone(const SetType& done) noexcept
{
- Assume(done.Any());
- Assume(done.IsSubsetOf(m_todo));
- m_todo -= done;
+ const auto done_sorted = OriginalToSorted(done);
+ Assume(done_sorted.Any());
+ Assume(done_sorted.IsSubsetOf(m_todo));
+ m_todo -= done_sorted;
}
};
@@ -744,7 +954,7 @@ public:
* - A boolean indicating whether the result is guaranteed to be
* optimal.
*
- * Complexity: O(N * min(max_iterations + N, 2^N)) where N=depgraph.TxCount().
+ * Complexity: possibly O(N * min(max_iterations + N, sqrt(2^N))) where N=depgraph.TxCount().
*/
template<typename SetType>
std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& depgraph, uint64_t max_iterations, uint64_t rng_seed, Span<const ClusterIndex> old_linearization = {}) noexcept
@@ -756,10 +966,20 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
std::vector<ClusterIndex> linearization;
AncestorCandidateFinder anc_finder(depgraph);
- SearchCandidateFinder src_finder(depgraph, rng_seed);
+ std::optional<SearchCandidateFinder<SetType>> src_finder;
linearization.reserve(depgraph.TxCount());
bool optimal = true;
+ // Treat the initialization of SearchCandidateFinder as taking N^2/64 (rounded up) iterations
+ // (largely due to the cost of constructing the internal sorted-by-feerate DepGraph inside
+ // SearchCandidateFinder), a rough approximation based on benchmark. If we don't have that
+ // many, don't start it.
+ uint64_t start_iterations = (uint64_t{depgraph.TxCount()} * depgraph.TxCount() + 63) / 64;
+ if (iterations_left > start_iterations) {
+ iterations_left -= start_iterations;
+ src_finder.emplace(depgraph, rng_seed);
+ }
+
/** Chunking of what remains of the old linearization. */
LinearizationChunking old_chunking(depgraph, old_linearization);
@@ -772,12 +992,22 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
auto best = anc_finder.FindCandidateSet();
if (!best_prefix.feerate.IsEmpty() && best_prefix.feerate >= best.feerate) best = best_prefix;
- // Invoke bounded search to update best, with up to half of our remaining iterations as
- // limit.
- uint64_t max_iterations_now = (iterations_left + 1) / 2;
uint64_t iterations_done_now = 0;
- std::tie(best, iterations_done_now) = src_finder.FindCandidateSet(max_iterations_now, best);
- iterations_left -= iterations_done_now;
+ uint64_t max_iterations_now = 0;
+ if (src_finder) {
+ // Treat the invocation of SearchCandidateFinder::FindCandidateSet() as costing N/4
+ // up-front (rounded up) iterations (largely due to the cost of connected-component
+ // splitting), a rough approximation based on benchmarks.
+ uint64_t base_iterations = (anc_finder.NumRemaining() + 3) / 4;
+ if (iterations_left > base_iterations) {
+ // Invoke bounded search to update best, with up to half of our remaining
+ // iterations as limit.
+ iterations_left -= base_iterations;
+ max_iterations_now = (iterations_left + 1) / 2;
+ std::tie(best, iterations_done_now) = src_finder->FindCandidateSet(max_iterations_now, best);
+ iterations_left -= iterations_done_now;
+ }
+ }
if (iterations_done_now == max_iterations_now) {
optimal = false;
@@ -795,7 +1025,7 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
// Update state to reflect best is no longer to be linearized.
anc_finder.MarkDone(best.transactions);
if (anc_finder.AllDone()) break;
- src_finder.MarkDone(best.transactions);
+ if (src_finder) src_finder->MarkDone(best.transactions);
if (old_chunking.NumChunksLeft() > 0) {
old_chunking.MarkDone(best.transactions);
}
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 0e5503a17f..ebdab1043e 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -315,7 +315,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
if (i->exactMatch)
match = (strURI == i->prefix);
else
- match = (strURI.substr(0, i->prefix.size()) == i->prefix);
+ match = strURI.starts_with(i->prefix);
if (match) {
path = strURI.substr(i->prefix.size());
break;
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 41bdca9df5..26de7eee32 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -151,7 +151,7 @@ bool BlockFilterIndex::CustomCommit(CDBBatch& batch)
LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile);
return false;
}
- if (!FileCommit(file.Get())) {
+ if (!file.Commit()) {
LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile);
return false;
}
@@ -201,11 +201,11 @@ size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter&
LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile);
return 0;
}
- if (!TruncateFile(last_file.Get(), pos.nPos)) {
+ if (!last_file.Truncate(pos.nPos)) {
LogPrintf("%s: Failed to truncate filter file %d\n", __func__, pos.nFile);
return 0;
}
- if (!FileCommit(last_file.Get())) {
+ if (!last_file.Commit()) {
LogPrintf("%s: Failed to commit filter file %d\n", __func__, pos.nFile);
return 0;
}
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 80f615ed0e..425a7f00a0 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -87,10 +87,7 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
CBlockHeader header;
try {
file >> header;
- if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) {
- LogError("%s: fseek(...) failed\n", __func__);
- return false;
- }
+ file.seek(postx.nTxOffset, SEEK_CUR);
file >> TX_WITH_WITNESS(tx);
} catch (const std::exception& e) {
LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what());
diff --git a/src/init.cpp b/src/init.cpp
index f67835d7da..d6c80d8f84 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -298,7 +298,7 @@ void Shutdown(NodeContext& node)
StopTorControl();
- if (node.chainman && node.chainman->m_thread_load.joinable()) node.chainman->m_thread_load.join();
+ if (node.background_init_thread.joinable()) node.background_init_thread.join();
// After everything has been shut down, but before things get flushed, stop the
// the scheduler. After this point, SyncWithValidationInterfaceQueue() should not be called anymore
// as this would prevent the shutdown from completing.
@@ -490,7 +490,7 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc)
argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location (only useable from command line, not configuration file) (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (minimum %d, default: %d). Make sure you have enough RAM. In addition, unused memory allocated to the mempool is shared with this cache (see -maxmempool).", nMinDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-allowignoredconf", strprintf("For backwards compatibility, treat an unused %s file in the datadir as a warning, not an error.", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -1356,7 +1356,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
if (!SplitHostPort(socket_addr, port_out, host_out)) {
#ifdef HAVE_SOCKADDR_UN
// Allow unix domain sockets for some options e.g. unix:/some/file/path
- if (!unix || socket_addr.find(ADDR_PREFIX_UNIX) != 0) {
+ if (!unix || !socket_addr.starts_with(ADDR_PREFIX_UNIX)) {
return InitError(InvalidPortErrMsg(arg, socket_addr));
}
#else
@@ -1789,7 +1789,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
vImportFiles.push_back(fs::PathFromString(strFile));
}
- chainman.m_thread_load = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] {
+ node.background_init_thread = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] {
ScheduleBatchPriority();
// Import blocks
ImportBlocks(chainman, vImportFiles);
diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h
index cebe97edb7..c73465bd2a 100644
--- a/src/interfaces/mining.h
+++ b/src/interfaces/mining.h
@@ -5,26 +5,45 @@
#ifndef BITCOIN_INTERFACES_MINING_H
#define BITCOIN_INTERFACES_MINING_H
-#include <node/types.h>
-#include <uint256.h>
+#include <consensus/amount.h> // for CAmount
+#include <node/types.h> // for BlockCreateOptions
+#include <primitives/block.h> // for CBlock, CBlockHeader
+#include <primitives/transaction.h> // for CTransactionRef
+#include <stdint.h> // for int64_t
+#include <uint256.h> // for uint256
-#include <memory>
-#include <optional>
+#include <memory> // for unique_ptr, shared_ptr
+#include <optional> // for optional
+#include <vector> // for vector
namespace node {
-struct CBlockTemplate;
struct NodeContext;
} // namespace node
class BlockValidationState;
-class CBlock;
class CScript;
namespace interfaces {
+//! Block template interface
+class BlockTemplate
+{
+public:
+ virtual ~BlockTemplate() = default;
+
+ virtual CBlockHeader getBlockHeader() = 0;
+ virtual CBlock getBlock() = 0;
+
+ virtual std::vector<CAmount> getTxFees() = 0;
+ virtual std::vector<int64_t> getTxSigops() = 0;
+
+ virtual CTransactionRef getCoinbaseTx() = 0;
+ virtual std::vector<unsigned char> getCoinbaseCommitment() = 0;
+ virtual int getWitnessCommitmentIndex() = 0;
+};
+
//! Interface giving clients (RPC, Stratum v2 Template Provider in the future)
//! ability to create block templates.
-
class Mining
{
public:
@@ -39,14 +58,14 @@ public:
//! Returns the hash for the tip of this chain
virtual std::optional<uint256> getTipHash() = 0;
- /**
+ /**
* Construct a new block template
*
* @param[in] script_pub_key the coinbase output
* @param[in] options options for creating the block
* @returns a block template
*/
- virtual std::unique_ptr<node::CBlockTemplate> createNewBlock(const CScript& script_pub_key, const node::BlockCreateOptions& options={}) = 0;
+ virtual std::unique_ptr<BlockTemplate> createNewBlock(const CScript& script_pub_key, const node::BlockCreateOptions& options = {}) = 0;
/**
* Processes new block. A valid new block is automatically relayed to peers.
diff --git a/src/net.cpp b/src/net.cpp
index 257f67ed7b..d87adcc031 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -2693,6 +2693,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect, Spa
const auto current_time{NodeClock::now()};
int nTries = 0;
+ const auto reachable_nets{g_reachable_nets.All()};
+
while (!interruptNet)
{
if (anchor && !m_anchors.empty()) {
@@ -2724,7 +2726,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect, Spa
if (!addr.IsValid()) {
// No tried table collisions. Select a new table address
// for our feeler.
- std::tie(addr, addr_last_try) = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets);
} else if (AlreadyConnectedToAddress(addr)) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
@@ -2733,14 +2735,16 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect, Spa
// a currently-connected peer.
addrman.Good(addr);
// Select a new table address for our feeler instead.
- std::tie(addr, addr_last_try) = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets);
}
} else {
// Not a feeler
// If preferred_net has a value set, pick an extra outbound
// peer from that network. The eviction logic in net_processing
// ensures that a peer from another network will be evicted.
- std::tie(addr, addr_last_try) = addrman.Select(false, preferred_net);
+ std::tie(addr, addr_last_try) = preferred_net.has_value()
+ ? addrman.Select(false, {*preferred_net})
+ : addrman.Select(false, reachable_nets);
}
// Require outbound IPv4/IPv6 connections, other than feelers, to be to distinct network groups
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 6553393160..b7d0f5360d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -113,9 +113,6 @@ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
/** Maximum timeout for stalling block download. */
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
-/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
- * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
-static const unsigned int MAX_HEADERS_RESULTS = 2000;
/** Maximum depth of blocks we're willing to serve as compact blocks to peers
* when requested. For older blocks, a regular BLOCK response will be sent. */
static const int MAX_CMPCTBLOCK_DEPTH = 5;
@@ -224,6 +221,9 @@ struct Peer {
/** Services this peer offered to us. */
std::atomic<ServiceFlags> m_their_services{NODE_NONE};
+ //! Whether this peer is an inbound connection
+ const bool m_is_inbound;
+
/** Protects misbehavior data members */
Mutex m_misbehavior_mutex;
/** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
@@ -394,9 +394,10 @@ struct Peer {
* timestamp the peer sent in the version message. */
std::atomic<std::chrono::seconds> m_time_offset{0s};
- explicit Peer(NodeId id, ServiceFlags our_services)
+ explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound)
: m_id{id}
, m_our_services{our_services}
+ , m_is_inbound{is_inbound}
{}
private:
@@ -476,11 +477,6 @@ struct CNodeState {
//! Time of last new block announcement
int64_t m_last_block_announcement{0};
-
- //! Whether this peer is an inbound connection
- const bool m_is_inbound;
-
- CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
};
class PeerManagerImpl final : public PeerManager
@@ -1015,7 +1011,7 @@ private:
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Have we requested this block from an outbound peer */
- bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
/** Remove this block from our tracked requested blocks. Called if:
* - the block has been received from a peer
@@ -1099,7 +1095,7 @@ private:
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
* removing the first element if necessary.
*/
- void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
@@ -1302,8 +1298,8 @@ bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
{
for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
auto [nodeid, block_it] = range.first->second;
- CNodeState& nodestate = *Assert(State(nodeid));
- if (!nodestate.m_is_inbound) return true;
+ PeerRef peer{GetPeerRef(nodeid)};
+ if (peer && !peer->m_is_inbound) return true;
}
return false;
@@ -1392,6 +1388,7 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
if (m_opts.ignore_incoming_txs) return;
CNodeState* nodestate = State(nodeid);
+ PeerRef peer{GetPeerRef(nodeid)};
if (!nodestate || !nodestate->m_provides_cmpctblocks) {
// Don't request compact blocks if the peer has not signalled support
return;
@@ -1404,15 +1401,15 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
return;
}
- CNodeState *state = State(*it);
- if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
+ PeerRef peer_ref{GetPeerRef(*it)};
+ if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
}
- if (nodestate->m_is_inbound) {
+ if (peer && peer->m_is_inbound) {
// If we're adding an inbound HB peer, make sure we're not removing
// our last outbound HB peer in the process.
if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
- CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
- if (remove_node != nullptr && !remove_node->m_is_inbound) {
+ PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
+ if (remove_peer && !remove_peer->m_is_inbound) {
// Put the HB outbound peer in the second slot, so that it
// doesn't get removed.
std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
@@ -1720,7 +1717,7 @@ void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_service
NodeId nodeid = node.GetId();
{
LOCK(cs_main); // For m_node_states
- m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
+ m_node_states.try_emplace(m_node_states.end(), nodeid);
}
{
LOCK(m_tx_download_mutex);
@@ -1731,7 +1728,7 @@ void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_service
our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
}
- PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
+ PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn());
{
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
@@ -1968,15 +1965,9 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
break;
case BlockValidationResult::BLOCK_CACHED_INVALID:
{
- LOCK(cs_main);
- CNodeState *node_state = State(nodeid);
- if (node_state == nullptr) {
- break;
- }
-
// Discourage outbound (but not inbound) peers if on an invalid chain.
// Exempt HB compact block peers. Manual connections are always protected from discouragement.
- if (!via_compact_block && !node_state->m_is_inbound) {
+ if (peer && !via_compact_block && !peer->m_is_inbound) {
if (peer) Misbehaving(*peer, message);
return;
}
@@ -2786,7 +2777,7 @@ bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>&
bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
{
if (peer.m_headers_sync) {
- auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
+ auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
// If it is a valid continuation, we should treat the existing getheaders request as responded to.
if (result.success) peer.m_last_getheaders_timestamp = {};
if (result.request_more) {
@@ -2880,7 +2871,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo
// Only try to sync with this peer if their headers message was full;
// otherwise they don't have more headers after this so no point in
// trying to sync their too-little-work chain.
- if (headers.size() == MAX_HEADERS_RESULTS) {
+ if (headers.size() == m_opts.max_headers_result) {
// Note: we could advance to the last header in this set that is
// known to us, rather than starting at the first header (which we
// may already have); however this is unlikely to matter much since
@@ -3192,7 +3183,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
assert(pindexLast);
// Consider fetching more headers if we are not using our headers-sync mechanism.
- if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
+ if (nCount == m_opts.max_headers_result && !have_headers_sync) {
// Headers message had its maximum size; the peer may have more headers.
if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
@@ -3200,7 +3191,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
}
}
- UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
+ UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
// Consider immediately downloading blocks.
HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
@@ -4518,7 +4509,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
std::vector<CBlock> vHeaders;
- int nLimit = MAX_HEADERS_RESULTS;
+ int nLimit = m_opts.max_headers_result;
LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
{
@@ -5002,7 +4993,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
- if (nCount > MAX_HEADERS_RESULTS) {
+ if (nCount > m_opts.max_headers_result) {
Misbehaving(*peer, strprintf("headers message size = %u", nCount));
return;
}
diff --git a/src/net_processing.h b/src/net_processing.h
index a413db98e8..5f45fb1442 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -31,6 +31,9 @@ static const bool DEFAULT_PEERBLOOMFILTERS = false;
static const bool DEFAULT_PEERBLOCKFILTERS = false;
/** Maximum number of outstanding CMPCTBLOCK requests for the same block. */
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK = 3;
+/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
+ * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
+static const unsigned int MAX_HEADERS_RESULTS = 2000;
struct CNodeStateStats {
int nSyncHeight = -1;
@@ -71,6 +74,8 @@ public:
//! Whether or not the internal RNG behaves deterministically (this is
//! a test-only option).
bool deterministic_rng{false};
+ //! Number of headers sent in one getheaders message result.
+ uint32_t max_headers_result{MAX_HEADERS_RESULTS};
};
static std::unique_ptr<PeerManager> make(CConnman& connman, AddrMan& addrman,
diff --git a/src/netbase.cpp b/src/netbase.cpp
index a6de72090e..76a5f9834e 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -230,7 +230,7 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault, DNSLookupF
bool IsUnixSocketPath(const std::string& name)
{
#ifdef HAVE_SOCKADDR_UN
- if (name.find(ADDR_PREFIX_UNIX) != 0) return false;
+ if (!name.starts_with(ADDR_PREFIX_UNIX)) return false;
// Split off "unix:" prefix
std::string str{name.substr(ADDR_PREFIX_UNIX.length())};
diff --git a/src/netbase.h b/src/netbase.h
index 8ef6c28996..bf4d7ececc 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -134,6 +134,13 @@ public:
return Contains(addr.GetNetwork());
}
+ [[nodiscard]] std::unordered_set<Network> All() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
+ {
+ AssertLockNotHeld(m_mutex);
+ LOCK(m_mutex);
+ return m_reachable;
+ }
+
private:
mutable Mutex m_mutex;
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index b40ca0260b..44c2808c3b 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -683,7 +683,7 @@ bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos
fileout << GetParams().MessageStart() << nSize;
// Write undo data
- long fileOutPos = ftell(fileout.Get());
+ long fileOutPos = fileout.tell();
if (fileOutPos < 0) {
LogError("%s: ftell failed\n", __func__);
return false;
@@ -981,7 +981,7 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
fileout << GetParams().MessageStart() << nSize;
// Write block
- long fileOutPos = ftell(fileout.Get());
+ long fileOutPos = fileout.tell();
if (fileOutPos < 0) {
LogError("%s: ftell failed\n", __func__);
return false;
diff --git a/src/node/caches.cpp b/src/node/caches.cpp
index 7403f7ddea..dc4d98f592 100644
--- a/src/node/caches.cpp
+++ b/src/node/caches.cpp
@@ -13,7 +13,6 @@ CacheSizes CalculateCacheSizes(const ArgsManager& args, size_t n_indexes)
{
int64_t nTotalCache = (args.GetIntArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
- nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greater than nMaxDbcache
CacheSizes sizes;
sizes.block_tree_db = std::min(nTotalCache / 8, nMaxBlockDBCache << 20);
nTotalCache -= sizes.block_tree_db;
diff --git a/src/node/context.h b/src/node/context.h
index a664fad80b..398089ff3c 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -9,6 +9,7 @@
#include <cstdlib>
#include <functional>
#include <memory>
+#include <thread>
#include <vector>
class ArgsManager;
@@ -86,6 +87,7 @@ struct NodeContext {
std::atomic<int> exit_status{EXIT_SUCCESS};
//! Manages all the node warnings
std::unique_ptr<node::Warnings> warnings;
+ std::thread background_init_thread;
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the NodeContext struct doesn't need to #include class
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 4a03183643..510541dfda 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -67,6 +67,7 @@
#include <boost/signals2/signal.hpp>
+using interfaces::BlockTemplate;
using interfaces::BlockTip;
using interfaces::Chain;
using interfaces::FoundBlock;
@@ -863,6 +864,52 @@ public:
NodeContext& m_node;
};
+class BlockTemplateImpl : public BlockTemplate
+{
+public:
+ explicit BlockTemplateImpl(std::unique_ptr<CBlockTemplate> block_template) : m_block_template(std::move(block_template))
+ {
+ assert(m_block_template);
+ }
+
+ CBlockHeader getBlockHeader() override
+ {
+ return m_block_template->block;
+ }
+
+ CBlock getBlock() override
+ {
+ return m_block_template->block;
+ }
+
+ std::vector<CAmount> getTxFees() override
+ {
+ return m_block_template->vTxFees;
+ }
+
+ std::vector<int64_t> getTxSigops() override
+ {
+ return m_block_template->vTxSigOpsCost;
+ }
+
+ CTransactionRef getCoinbaseTx() override
+ {
+ return m_block_template->block.vtx[0];
+ }
+
+ std::vector<unsigned char> getCoinbaseCommitment() override
+ {
+ return m_block_template->vchCoinbaseCommitment;
+ }
+
+ int getWitnessCommitmentIndex() override
+ {
+ return GetWitnessCommitmentIndex(m_block_template->block);
+ }
+
+ const std::unique_ptr<CBlockTemplate> m_block_template;
+};
+
class MinerImpl : public Mining
{
public:
@@ -909,11 +956,11 @@ public:
return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, tip, /*fCheckPOW=*/false, check_merkle_root);
}
- std::unique_ptr<CBlockTemplate> createNewBlock(const CScript& script_pub_key, const BlockCreateOptions& options) override
+ std::unique_ptr<BlockTemplate> createNewBlock(const CScript& script_pub_key, const BlockCreateOptions& options) override
{
BlockAssembler::Options assemble_options{options};
ApplyArgsManOptions(*Assert(m_node.args), assemble_options);
- return BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(script_pub_key);
+ return std::make_unique<BlockTemplateImpl>(BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(script_pub_key));
}
NodeContext* context() override { return &m_node; }
diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp
index a265c2e12d..ff7de8c64a 100644
--- a/src/node/mempool_persist.cpp
+++ b/src/node/mempool_persist.cpp
@@ -199,8 +199,8 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock
LogInfo("Writing %d unbroadcast transactions to file.\n", unbroadcast_txids.size());
file << unbroadcast_txids;
- if (!skip_file_commit && !FileCommit(file.Get()))
- throw std::runtime_error("FileCommit failed");
+ if (!skip_file_commit && !file.Commit())
+ throw std::runtime_error("Commit failed");
file.fclose();
if (!RenameOver(dump_path + ".new", dump_path)) {
throw std::runtime_error("Rename failed");
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index 97f6ac346a..181ae2ef05 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -113,10 +113,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
-
- if (!pblocktemplate.get()) {
- return nullptr;
- }
CBlock* const pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp
index 976421e455..7d589c886b 100644
--- a/src/node/utxo_snapshot.cpp
+++ b/src/node/utxo_snapshot.cpp
@@ -73,9 +73,11 @@ std::optional<uint256> ReadSnapshotBaseBlockhash(fs::path chaindir)
}
afile >> base_blockhash;
- if (std::fgetc(afile.Get()) != EOF) {
+ int64_t position = afile.tell();
+ afile.seek(0, SEEK_END);
+ if (position != afile.tell()) {
LogPrintf("[snapshot] warning: unexpected trailing data in %s\n", read_from_str);
- } else if (std::ferror(afile.Get())) {
+ } else if (afile.IsError()) {
LogPrintf("[snapshot] warning: i/o error reading %s\n", read_from_str);
}
return base_blockhash;
diff --git a/src/pow.cpp b/src/pow.cpp
index 50de8946be..6c8e7e5d98 100644
--- a/src/pow.cpp
+++ b/src/pow.cpp
@@ -134,8 +134,19 @@ bool PermittedDifficultyTransition(const Consensus::Params& params, int64_t heig
return true;
}
+// Bypasses the actual proof of work check during fuzz testing with a simplified validation checking whether
+// the most signficant bit of the last byte of the hash is set.
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& params)
{
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ return (hash.data()[31] & 0x80) == 0;
+#else
+ return CheckProofOfWorkImpl(hash, nBits, params);
+#endif
+}
+
+bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params)
+{
bool fNegative;
bool fOverflow;
arith_uint256 bnTarget;
diff --git a/src/pow.h b/src/pow.h
index ec03f318a4..2b28ade273 100644
--- a/src/pow.h
+++ b/src/pow.h
@@ -19,6 +19,7 @@ unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nF
/** Check whether a block hash satisfies the proof-of-work requirement specified by nBits */
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&);
+bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params&);
/**
* Return false if the proof-of-work requirement specified by new_nbits at a
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index 99fb238772..1ce23c72bb 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -105,7 +105,7 @@
<item>
<widget class="QLabel" name="databaseCacheLabel">
<property name="toolTip">
- <string extracomment="Tooltip text for Options window setting that sets the size of the database cache. Explains the corresponding effects of increasing/decreasing this value.">Maximum database cache size. A larger cache can contribute to faster sync, after which the benefit is less pronounced for most use cases. Lowering the cache size will reduce memory usage. Unused mempool memory is shared for this cache.</string>
+ <string extracomment="Tooltip text for Options window setting that sets the size of the database cache. Explains the corresponding effects of increasing/decreasing this value.">Maximum database cache size. Make sure you have enough RAM. A larger cache can contribute to faster sync, after which the benefit is less pronounced for most use cases. Lowering the cache size will reduce memory usage. Unused mempool memory is shared for this cache.</string>
</property>
<property name="text">
<string>Size of &amp;database cache</string>
diff --git a/src/qt/locale/bitcoin_am.ts b/src/qt/locale/bitcoin_am.ts
index 2fcf7a1e63..c02049b984 100644
--- a/src/qt/locale/bitcoin_am.ts
+++ b/src/qt/locale/bitcoin_am.ts
@@ -176,6 +176,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ቦርሳዎ ምስጢር ተደርጓል</translation>
</message>
<message>
+ <source>Back</source>
+ <translation type="unfinished">ተመለስ</translation>
+ </message>
+ <message>
<source>Wallet to be encrypted</source>
<translation type="unfinished">ለመመስጠር የተዘጋጀ ዋሌት</translation>
</message>
@@ -254,6 +258,18 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ስህተት፥ %1</translation>
</message>
<message>
+ <source>Embedded "%1"</source>
+ <translation type="unfinished">የተከተተ "%1"</translation>
+ </message>
+ <message>
+ <source>Default system font "%1"</source>
+ <translation type="unfinished">ነባሪ የስርዓት ቅርጸ-ቁምፊ "%1</translation>
+ </message>
+ <message>
+ <source>Custom…</source>
+ <translation type="unfinished">ብጁ…</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">መጠን</translation>
</message>
@@ -363,6 +379,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">&amp;ተቀበል</translation>
</message>
<message>
+ <source>&amp;Change Passphrase…</source>
+ <translation type="unfinished">&amp;የይለፍ ቃል ቀይር…</translation>
+ </message>
+ <message>
+ <source>Sign messages with your Bitcoin addresses to prove you own them</source>
+ <translation type="unfinished">በእርሶ የተያዙ መሆኑን ለማረጋገጥ በBitcoin አድራሻዎችዎ መልዕክቶችን ይፈርሙ</translation>
+ </message>
+ <message>
<source>&amp;File</source>
<translation type="unfinished">&amp;ፋይል</translation>
</message>
@@ -406,6 +430,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ዋሌት ዝጋ</translation>
</message>
<message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrate a wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<extracomment>Label of the input field where the name of the wallet is entered.</extracomment>
<translation type="unfinished">ዋሌት ስም</translation>
@@ -423,6 +455,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</translation>
</message>
<message>
+ <source>Error creating wallet</source>
+ <translation type="unfinished">ዋሌትዎን ለፍጠር ተሳስተዋል </translation>
+ </message>
+ <message>
+ <source>Cannot create new wallet, the software was compiled without sqlite support (required for descriptor wallets)</source>
+ <translation type="unfinished">አዲስ ዋሌት መፍጠር አልተቻለም፣ ሶፍትዌሩ የተቀናበረው ያለ ስኩላይት ድጋፍ ነው (ለገላጭ ዋሌቶች ያስፈልጋል)</translation>
+ </message>
+ <message>
<source>Error: %1</source>
<translation type="unfinished">ስህተት፥ %1</translation>
</message>
@@ -485,6 +525,49 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>MigrateWalletActivity</name>
+ <message>
+ <source>Migrate wallet</source>
+ <translation type="unfinished">ዋሌት ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrating the wallet will convert this wallet to one or more descriptor wallets. A new wallet backup will need to be made.
+If this wallet contains any watchonly scripts, a new wallet will be created which contains those watchonly scripts.
+If this wallet contains any solvable but not watched scripts, a different and new wallet will be created which contains those scripts.
+
+The migration process will create a backup of the wallet before migrating. This backup file will be named &lt;wallet name&gt;-&lt;timestamp&gt;.legacy.bak and can be found in the directory for this wallet. In the event of an incorrect migration, the backup can be restored with the "Restore Wallet" functionality.</source>
+ <translation type="unfinished">ዋሌትን ማዛወር ይህንን ዋሌት አንድ ወይም ከዚያ በላይ ወደሆነ ገላጭ ዋሌቶች ይቀይረዋል።አዲስ ዋሌት ማዘጋጀት ያስፈልጋል ።ይህ ዋሌት ምንም ዓይነት የመመልከት ብቻ ስክሪፕቶችን የያዘ ከሆነ፣እነዚያን የመመልከት ብቻ ስክሪፕቶችን የያዘ አዲስ ዋሌት ይፈጠራል።ይህ ዋሌት ሊፈቱ የሚችሉ ነገር ግን የመመልከት ብቻ ያልሆኑ ስክሪፕቶችን የያዘ ከሆነ ፣ እነዚህን የያዘ አዲስ እና ልዩ የሆነ ዋሌት ይፈጠራል ።የማዛወር ሂደቱ ማዘዋወር ከመፈጸሙ በፊት የነዚህን ዋሌቶች መጠባበቂያ ቅጂ ይይዛል።ይህ መጠባበቂያ ቅጂ 1-2 legacy.bak ተብሎ ተሰይሞ በዋሌቱ ማውጫ ውስጥ ይገኛል።የተሳሳተ ዝውውር በሚከሰትበት ጊዜየመጠባበቂያ ቅጂው በ ዋሌት መመለሻ መተግበሪያ ውስጥ ይከማቻል።</translation>
+ </message>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrating Wallet &lt;b&gt;%1&lt;/b&gt;…</source>
+ <translation type="unfinished">ዋሌት ማዘዋወር &lt;b&gt;%1&lt;/b&gt;…</translation>
+ </message>
+ <message>
+ <source>The wallet '%1' was migrated successfully.</source>
+ <translation type="unfinished">ዋሌት '%1' በትክክል ተዛውሯል </translation>
+ </message>
+ <message>
+ <source>Watchonly scripts have been migrated to a new wallet named '%1'.</source>
+ <translation type="unfinished">የመመልከት ብቻ ስክሪፕቶች'%1'.ወደ ተሰኘው ዋሌት ተዛውረዋል </translation>
+ </message>
+ <message>
+ <source>Solvable but not watched scripts have been migrated to a new wallet named '%1'.</source>
+ <translation type="unfinished">ሊፈቱ የሚችሉ ነገር ግን የማይታዩ ስክሪፕቶች ወደ አዲስ ዋሌት ተዛውረዋል '%1'።</translation>
+ </message>
+ <message>
+ <source>Migration failed</source>
+ <translation type="unfinished">ዝውውሩ አልተሳካም </translation>
+ </message>
+ <message>
+ <source>Migration Successful</source>
+ <translation type="unfinished">ዝውውር ተሳክቷል </translation>
+ </message>
+</context>
+<context>
<name>OpenWalletActivity</name>
<message>
<source>Open Wallet</source>
@@ -502,6 +585,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<context>
<name>CreateWalletDialog</name>
<message>
+ <source>You are one step away from creating your new wallet!</source>
+ <translation type="unfinished">አዲሱን ዋሌትዎን ለመፍጠር አንድ እርምጃ ይቀርዎታል</translation>
+ </message>
+ <message>
+ <source>Please provide a name and, if desired, enable any advanced options</source>
+ <translation type="unfinished">እባክዎ ስም ያስገቡ እና ከተፈለገ ማንኛውንም የላቁ አማራጮችን ያንቁ</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<translation type="unfinished">ዋሌት ስም</translation>
</message>
@@ -590,6 +681,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<context>
<name>OptionsDialog</name>
<message>
+ <source>Font in the Overview tab: </source>
+ <translation type="unfinished">በአጠቃላይ እይታ ትር ውስጥ ያለ ፊደል</translation>
+ </message>
+ <message>
<source>Error</source>
<translation type="unfinished">ስህተት</translation>
</message>
@@ -602,6 +697,13 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>PSBTOperationsDialog</name>
+ <message>
+ <source>Sends %1 to %2</source>
+ <translation type="unfinished">%1 ወደ %2 ይልካል</translation>
+ </message>
+ </context>
+<context>
<name>PeerTableModel</name>
<message>
<source>Address</source>
@@ -610,6 +712,56 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>RPCConsole</name>
+ <message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">የአካባቢ አድራሻዎች</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">የእርስዎ Bitcoin ኖድ ከሌሎች ኖዶች ጋር ለመገናኘት በአሁኑ ጊዜ እየተጠቀመበት ያለው የአውታረ መረብ አድራሻ።</translation>
+ </message>
+ <message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">የአቻዎችን ዝርዝር ደብቅ</translation>
+ </message>
+ <message>
+ <source>The transport layer version: %1</source>
+ <translation type="unfinished">የማጓጓዣ ንብርብር ስሪት፡ %1</translation>
+ </message>
+ <message>
+ <source>Transport</source>
+ <translation type="unfinished">መጓጓዣ</translation>
+ </message>
+ <message>
+ <source>Session ID</source>
+ <translation type="unfinished">የክፍለ ጊዜ መለያ</translation>
+ </message>
+ <message>
+ <source>The BIP324 session ID string in hex.</source>
+ <translation type="unfinished">የBIP324 ክፍለ ጊዜ መለያ ሕብረቁምፊ በሄክስ።</translation>
+ </message>
+ <message>
+ <source>detecting: peer could be v1 or v2</source>
+ <extracomment>Explanatory text for "detecting" transport type.</extracomment>
+ <translation type="unfinished">ማወቅ፡ እኩያ v1 ወይም v2 ሊሆን ይችላል።</translation>
+ </message>
+ <message>
+ <source>v1: unencrypted, plaintext transport protocol</source>
+ <extracomment>Explanatory text for v1 transport type.</extracomment>
+ <translation type="unfinished">v1፡ ያልተመሰጠረ፣ ግልጽ የጽሑፍ ትራንስፖርት ፕሮቶኮል</translation>
+ </message>
+ <message>
+ <source>v2: BIP324 encrypted transport protocol</source>
+ <extracomment>Explanatory text for v2 transport type.</extracomment>
+ <translation type="unfinished">v2፡ BIP324 የተመሰጠረ የትራንስፖርት ፕሮቶኮል</translation>
+ </message>
+ <message>
+ <source>Node window - [%1]</source>
+ <translation type="unfinished">የኖድ መስኮት - [%1]</translation>
+ </message>
+ </context>
+<context>
<name>ReceiveRequestDialog</name>
<message>
<source>Amount:</source>
@@ -661,6 +813,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<source>Copy fee</source>
<translation type="unfinished">ክፍያው ቅዳ</translation>
</message>
+ <message>
+ <source>%1 from wallet '%2'</source>
+ <translation type="unfinished">%1 ከዋሌት %2'</translation>
+ </message>
<message numerus="yes">
<source>Estimated to begin confirmation within %n block(s).</source>
<translation type="unfinished">
@@ -674,6 +830,17 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>SignVerifyMessageDialog</name>
+ <message>
+ <source>You can sign messages/agreements with your legacy (P2PKH) addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
+ <translation type="unfinished">ወደ እነርሱ የተላኩ ቢትኮይን መቀበል እንደሚችሉ ለማረጋገጥ ከርስዎ (P2PKH) አድራሻዎች ጋር መልዕክቶችን/ስምምነቶችን መፈረም ይችላሉ። የማስገር ጥቃቶች እርስዎን ማንነትዎን በእነሱ ላይ እንዲፈርሙ ሊያታልሉዎት ስለሚችሉ ግልጽ ያልሆነ ወይም በዘፈቀደ ላለመፈረም ይጠንቀቁ። የተስማሙባቸውን ሙሉ ዝርዝር መግለጫዎች ብቻ ይፈርሙ።</translation>
+ </message>
+ <message>
+ <source>The entered address does not refer to a legacy (P2PKH) key. Message signing for SegWit and other non-P2PKH address types is not supported in this version of %1. Please check the address and try again.</source>
+ <translation type="unfinished">የገባው አድራሻ የቅርስ (P2PKH) ቁልፍን አያመለክትም። ለሴግዊት እና ሌሎች P2PKH ላልሆኑ የአድራሻ አይነቶች የመልዕክት መፈረም በዚህ የ%1 ስሪት ውስጥ አይደገፍም። እባክዎ አድራሻውን ያረጋግጡ እና እንደገና ይሞክሩ።</translation>
+ </message>
+ </context>
+<context>
<name>TransactionDesc</name>
<message>
<source>Date</source>
@@ -687,6 +854,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</translation>
</message>
<message>
+ <source>%1 (Certificate was not verified)</source>
+ <translation type="unfinished">%1 (ማረጋገጫው አልተረጋገጠም)</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">መጠን</translation>
</message>
@@ -742,6 +913,17 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>WalletModel</name>
+ <message>
+ <source>Fee-bump PSBT copied to clipboard</source>
+ <translation type="unfinished">ከክፍያ-ነፃ PSBT ወደ ቅንጥብ ሰሌዳ ተቀድቷል።</translation>
+ </message>
+ <message>
+ <source>Signer error</source>
+ <translation type="unfinished">የፈራሚ ስህተት</translation>
+ </message>
+ </context>
+<context>
<name>WalletView</name>
<message>
<source>&amp;Export</source>
diff --git a/src/qt/locale/bitcoin_bn.ts b/src/qt/locale/bitcoin_bn.ts
index d22624ad85..bfb104a7a3 100644
--- a/src/qt/locale/bitcoin_bn.ts
+++ b/src/qt/locale/bitcoin_bn.ts
@@ -276,6 +276,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন লোড করুন</translation>
</message>
<message>
+ <source>Load PSBT from &amp;clipboard…</source>
+ <translation type="unfinished">&amp;ক্লিপবোর্ড থেকে আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন আনুন</translation>
+ </message>
+ <message>
<source>Load Partially Signed Bitcoin Transaction from clipboard</source>
<translation type="unfinished">ক্লিপবোর্ড থেকে আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন লোড করুন</translation>
</message>
diff --git a/src/qt/locale/bitcoin_de.ts b/src/qt/locale/bitcoin_de.ts
index 474ce2a088..807a1c64ee 100644
--- a/src/qt/locale/bitcoin_de.ts
+++ b/src/qt/locale/bitcoin_de.ts
@@ -615,11 +615,15 @@ Das Signieren ist nur mit Adressen vom Typ 'Legacy' möglich.</translation>
<source>Processing blocks on disk…</source>
<translation type="unfinished">Verarbeite Blöcke auf Datenträger...</translation>
</message>
+ <message>
+ <source>Connecting to peers…</source>
+ <translation type="unfinished">Verbindung zu Peers wird hergestellt…</translation>
+ </message>
<message numerus="yes">
<source>Processed %n block(s) of transaction history.</source>
<translation type="unfinished">
- <numerusform>Processed %n block(s) of transaction history.</numerusform>
- <numerusform>Processed %n block(s) of transaction history.</numerusform>
+ <numerusform>%n Block der Transaktionshistorie prozessiert.</numerusform>
+ <numerusform>%n Block/Blöcke der Transaktionshistorie prozessiert.</numerusform>
</translation>
</message>
<message>
@@ -710,7 +714,7 @@ Das Signieren ist nur mit Adressen vom Typ 'Legacy' möglich.</translation>
<message>
<source>Show Peers tab</source>
<extracomment>A context menu item. The "Peers tab" is an element of the "Node window".</extracomment>
- <translation type="unfinished">Gegenstellen Reiter anzeigen</translation>
+ <translation type="unfinished">Reiter mit Peers anzeigen</translation>
</message>
<message>
<source>Disable network activity</source>
@@ -1420,7 +1424,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>%1 is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain.</source>
- <translation type="unfinished">%1 synchronisiert gerade. Es lädt Header und Blöcke von Gegenstellen und validiert sie bis zum Erreichen der Spitze der Blockchain.</translation>
+ <translation type="unfinished">%1 synchronisiert gerade. Es lädt Header und Blöcke von Peers und validiert sie bis zum Erreichen der Spitze der Blockchain.</translation>
</message>
<message>
<source>Unknown. Syncing Headers (%1, %2%)…</source>
@@ -1639,7 +1643,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>Used for reaching peers via:</source>
- <translation type="unfinished">Benutzt um Gegenstellen zu erreichen über:</translation>
+ <translation type="unfinished">Benutzt um Peers zu erreichen über:</translation>
</message>
<message>
<source>&amp;Window</source>
@@ -1703,7 +1707,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</source>
- <translation type="unfinished">Nutze separaten SOCKS&amp;5-Proxy um Gegenstellen über Tor-Onion-Dienste zu erreichen:</translation>
+ <translation type="unfinished">Nutze separaten SOCKS&amp;5-Proxy um Peers über Tor-Onion-Dienste zu erreichen:</translation>
</message>
<message>
<source>&amp;Cancel</source>
@@ -2042,11 +2046,6 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">User-Agent</translation>
</message>
<message>
- <source>Peer</source>
- <extracomment>Title of Peers Table column which contains a unique number used to identify a connection.</extracomment>
- <translation type="unfinished">Gegenstelle</translation>
- </message>
- <message>
<source>Age</source>
<extracomment>Title of Peers Table column which indicates the duration (length of time) since the peer connection started.</extracomment>
<translation type="unfinished">Alter</translation>
@@ -2171,6 +2170,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Anzahl der Verbindungen</translation>
</message>
<message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">Lokale Adressen</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">Netzwerk-Adressen, die dein Bitcoin-Node aktuell verwendet, um mit anderen Nodes zu kommunizieren.</translation>
+ </message>
+ <message>
<source>Block chain</source>
<translation type="unfinished">Blockchain</translation>
</message>
@@ -2203,16 +2210,20 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Gesendet</translation>
</message>
<message>
- <source>&amp;Peers</source>
- <translation type="unfinished">&amp;Gegenstellen</translation>
- </message>
- <message>
<source>Banned peers</source>
- <translation type="unfinished">Gesperrte Gegenstellen</translation>
+ <translation type="unfinished">Gesperrte Peers</translation>
</message>
<message>
<source>Select a peer to view detailed information.</source>
- <translation type="unfinished">Gegenstelle auswählen, um detaillierte Informationen zu erhalten.</translation>
+ <translation type="unfinished">Peers auswählen, um detaillierte Informationen zu erhalten.</translation>
+ </message>
+ <message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">Reiter mit Peers verstecken</translation>
+ </message>
+ <message>
+ <source>Ctrl+X</source>
+ <translation type="unfinished">Strg+X</translation>
</message>
<message>
<source>The transport layer version: %1</source>
@@ -2220,7 +2231,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>Whether we relay transactions to this peer.</source>
- <translation type="unfinished">Ob wir Adressen an diese Gegenstelle weiterleiten.</translation>
+ <translation type="unfinished">Ob wir Adressen an diesen Peer weiterleiten.</translation>
</message>
<message>
<source>Transaction Relay</source>
@@ -2244,7 +2255,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The mapped Autonomous System used for diversifying peer selection.</source>
- <translation type="unfinished">Das zugeordnete autonome System zur Diversifizierung der Gegenstellen-Auswahl.</translation>
+ <translation type="unfinished">Das zugeordnete autonome System zur Diversifizierung der Peer-Auswahl.</translation>
</message>
<message>
<source>Mapped AS</source>
@@ -2253,7 +2264,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Whether we relay addresses to this peer.</source>
<extracomment>Tooltip text for the Address Relay field in the peer details area, which displays whether we relay addresses to this peer (Yes/No).</extracomment>
- <translation type="unfinished">Ob wir Adressen an diese Gegenstelle weiterleiten.</translation>
+ <translation type="unfinished">Ob wir Adressen an diesen Peer weiterleiten.</translation>
</message>
<message>
<source>Address Relay</source>
@@ -2263,12 +2274,12 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>The total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting).</source>
<extracomment>Tooltip text for the Addresses Processed field in the peer details area, which displays the total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting).</extracomment>
- <translation type="unfinished">Die Gesamtzahl der von dieser Gegenstelle empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
+ <translation type="unfinished">Die Gesamtzahl der von diesem Peer empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
</message>
<message>
<source>The total number of addresses received from this peer that were dropped (not processed) due to rate-limiting.</source>
<extracomment>Tooltip text for the Addresses Rate-Limited field in the peer details area, which displays the total number of addresses received from this peer that were dropped (not processed) due to rate-limiting.</extracomment>
- <translation type="unfinished">Die Gesamtzahl der von dieser Gegenstelle empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
+ <translation type="unfinished">Die Gesamtzahl der von diesem Peer empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
</message>
<message>
<source>Addresses Processed</source>
@@ -2306,7 +2317,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The direction and type of peer connection: %1</source>
- <translation type="unfinished">Die Richtung und der Typ der Gegenstellen-Verbindung: %1</translation>
+ <translation type="unfinished">Die Richtung und der Typ der Peer-Verbindung: %1</translation>
</message>
<message>
<source>Direction/Type</source>
@@ -2318,7 +2329,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The network protocol this peer is connected through: IPv4, IPv6, Onion, I2P, or CJDNS.</source>
- <translation type="unfinished">Das Netzwerkprotokoll, über das diese Gegenstelle verbunden ist, ist: IPv4, IPv6, Onion, I2P oder CJDNS.</translation>
+ <translation type="unfinished">Das Netzwerkprotokoll, über das dieser Peer verbunden ist, ist: IPv4, IPv6, Onion, I2P oder CJDNS.</translation>
</message>
<message>
<source>Services</source>
@@ -2338,7 +2349,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>Elapsed time since a novel block passing initial validity checks was received from this peer.</source>
- <translation type="unfinished">Abgelaufene Zeit seitdem ein neuer Block mit erfolgreichen initialen Gültigkeitsprüfungen von dieser Gegenstelle empfangen wurde.</translation>
+ <translation type="unfinished">Verstrichene Zeit, seit ein neuer Block, der initiale Validierungsprüfungen bestanden hat, von diesem Peer empfangen wurde.</translation>
</message>
<message>
<source>Last Block</source>
@@ -2347,7 +2358,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Elapsed time since a novel transaction accepted into our mempool was received from this peer.</source>
<extracomment>Tooltip text for the Last Transaction field in the peer details area.</extracomment>
- <translation type="unfinished">Abgelaufene Zeit seit eine neue Transaktion, die in unseren Speicherpool hineingelassen wurde, von dieser Gegenstelle empfangen wurde.</translation>
+ <translation type="unfinished">Verstrichene Zeit, seit eine neue Transaktion, die in unseren Mempool aufgenommen wurde, von diesem Peer empfangen wurde.</translation>
</message>
<message>
<source>Last Send</source>
@@ -2416,7 +2427,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Inbound: initiated by peer</source>
<extracomment>Explanatory text for an inbound peer connection.</extracomment>
- <translation type="unfinished">Eingehend: wurde von Gegenstelle initiiert</translation>
+ <translation type="unfinished">Eingehend: wurde vom Peer initiiert</translation>
</message>
<message>
<source>Outbound Full Relay: default</source>
@@ -2446,7 +2457,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>detecting: peer could be v1 or v2</source>
<extracomment>Explanatory text for "detecting" transport type.</extracomment>
- <translation type="unfinished">Erkennen: Peer könnte v1 oder v2 sein</translation>
+ <translation type="unfinished">Erkenne: Peer könnte v1 oder v2 sein</translation>
</message>
<message>
<source>v1: unencrypted, plaintext transport protocol</source>
@@ -2460,11 +2471,11 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>we selected the peer for high bandwidth relay</source>
- <translation type="unfinished">Wir haben die Gegenstelle zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
+ <translation type="unfinished">Wir haben den Peer zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
</message>
<message>
<source>the peer selected us for high bandwidth relay</source>
- <translation type="unfinished">Die Gegenstelle hat uns zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
+ <translation type="unfinished">Der Peer hat uns zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
</message>
<message>
<source>no high bandwidth relay selected</source>
@@ -2584,7 +2595,7 @@ Für weitere Informationen über diese Konsole, tippe %6.
</message>
<message>
<source>(peer: %1)</source>
- <translation type="unfinished">(Gegenstelle: %1)</translation>
+ <translation type="unfinished">(Peer: %1)</translation>
</message>
<message>
<source>via %1</source>
@@ -3996,7 +4007,7 @@ Gehen Sie zu Datei &gt; Wallet Öffnen, um eine Wallet zu laden.
</message>
<message>
<source>%s request to listen on port %u. This port is considered "bad" and thus it is unlikely that any peer will connect to it. See doc/p2p-bad-ports.md for details and a full list.</source>
- <translation type="unfinished">%s Aufforderung, auf Port %u zu lauschen. Dieser Port wird als "schlecht" eingeschätzt und es ist daher unwahrscheinlich, dass sich Bitcoin Core Gegenstellen mit ihm verbinden. Siehe doc/p2p-bad-ports.md für Details und eine vollständige Liste.</translation>
+ <translation type="unfinished">%s Aufforderung, auf Port %u zu lauschen. Dieser Port wird als "schlecht" eingeschätzt und es ist daher unwahrscheinlich, dass sich Peers mit ihm verbinden. Siehe doc/p2p-bad-ports.md für Details und eine vollständige Liste.</translation>
</message>
<message>
<source>Cannot downgrade wallet from version %i to version %i. Wallet version unchanged.</source>
@@ -4161,7 +4172,7 @@ Bitte nutzen Sie entweder "bdb" oder "sqlite".</translation>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
- <translation type="unfinished">Warnung: Wir scheinen nicht vollständig mit unseren Gegenstellen übereinzustimmen! Sie oder die anderen Knoten müssen unter Umständen Ihre Client-Software aktualisieren.</translation>
+ <translation type="unfinished">Warnung: Wir scheinen nicht vollständig mit unseren Peers übereinzustimmen! Sie oder die anderen Knoten müssen unter Umständen Ihre Client-Software aktualisieren.</translation>
</message>
<message>
<source>Witness data for blocks after height %d requires validation. Please restart with -reindex.</source>
diff --git a/src/qt/locale/bitcoin_de_CH.ts b/src/qt/locale/bitcoin_de_CH.ts
index a9a15f08ff..ffbc761ac1 100644
--- a/src/qt/locale/bitcoin_de_CH.ts
+++ b/src/qt/locale/bitcoin_de_CH.ts
@@ -2227,6 +2227,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Anzahl der Verbindungen</translation>
</message>
<message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">Lokale Adressen</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">Netzwerk-Adressen, die dein Bitcoin-Node aktuell verwendet, um mit anderen Nodes zu kommunizieren.</translation>
+ </message>
+ <message>
<source>Block chain</source>
<translation type="unfinished">Blockchain</translation>
</message>
@@ -2271,6 +2279,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Gegenstelle auswählen, um detaillierte Informationen zu erhalten.</translation>
</message>
<message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">Gegenstellen Reiter verstecken</translation>
+ </message>
+ <message>
+ <source>Ctrl+X</source>
+ <translation type="unfinished">Strg+X</translation>
+ </message>
+ <message>
<source>The transport layer version: %1</source>
<translation type="unfinished">Die Transportschicht-Version: %1</translation>
</message>
diff --git a/src/qt/locale/bitcoin_gl_ES.ts b/src/qt/locale/bitcoin_gl_ES.ts
index 77f9f3278c..47951fa6f2 100644
--- a/src/qt/locale/bitcoin_gl_ES.ts
+++ b/src/qt/locale/bitcoin_gl_ES.ts
@@ -2,10 +2,6 @@
<context>
<name>AddressBookPage</name>
<message>
- <source>Right-click to edit address or label</source>
- <translation type="unfinished">cd vcpkg/buildtrees/libvpx/srccd *./configuresed -i 's/CFLAGS+=-I/CFLAGS+=-fPIC -I/g' Makefilesed -i 's/CXXFLAGS+=-I/CXXFLAGS+=-fPIC -I/g' Makefilemakecp libvpx.a $HOME/vcpkg/installed/x64-linux/lib/cd</translation>
- </message>
- <message>
<source>Create a new address</source>
<translation type="unfinished">Crea un novo enderezo</translation>
</message>
@@ -94,6 +90,14 @@ Firmar é posible unicamente con enderezos de tipo 'legacy'.</translation>
<translation type="unfinished">Houbo un erro tentando gardar a lista de enderezos en %1. Por favor proba de novo.</translation>
</message>
<message>
+ <source>Sending addresses - %1</source>
+ <translation type="unfinished">Enviando enderezos - %1</translation>
+ </message>
+ <message>
+ <source>Receiving addresses - %1</source>
+ <translation type="unfinished">Recibindo enderezos - %1</translation>
+ </message>
+ <message>
<source>Exporting Failed</source>
<translation type="unfinished">Exportación Fallida</translation>
</message>
diff --git a/src/qt/locale/bitcoin_ru.ts b/src/qt/locale/bitcoin_ru.ts
index 204b66774e..7a54b45768 100644
--- a/src/qt/locale/bitcoin_ru.ts
+++ b/src/qt/locale/bitcoin_ru.ts
@@ -310,6 +310,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<source>unknown</source>
<translation type="unfinished">неизвестно</translation>
</message>
+ <message>
+ <source>Default system font "%1"</source>
+ <translation type="unfinished">Системный шрифт по умолчанию "%1"</translation>
+ </message>
<message numerus="yes">
<source>%n second(s)</source>
<translation type="unfinished">
@@ -1526,6 +1530,10 @@ The migration process will create a backup of the wallet before migrating. This
<translation type="unfinished">Сворачивать вместо выхода из приложения при закрытии окна. Если данный параметр включён, приложение закроется только после нажатия "Выход" в меню.</translation>
</message>
<message>
+ <source>Font in the Overview tab: </source>
+ <translation type="unfinished">Шрифт на вкладке «Обзор»:</translation>
+ </message>
+ <message>
<source>Options set in this dialog are overridden by the command line:</source>
<translation type="unfinished">Параметры командной строки, которые переопределили параметры из этого окна:</translation>
</message>
@@ -1951,6 +1959,17 @@ The migration process will create a backup of the wallet before migrating. This
</message>
</context>
<context>
+ <name>SignVerifyMessageDialog</name>
+ <message>
+ <source>You can sign messages/agreements with your legacy (P2PKH) addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
+ <translation type="unfinished">Вы можете подписывать сообщения/соглашения своими устаревшими (P2PKH) адресами, чтобы доказать, что вы можете получать биткоины на них. Будьте осторожны и не подписывайте непонятные или случайные сообщения, так как мошенники могут таким образом пытаться присвоить вашу личность. Подписывайте только такие сообщения, с которыми вы согласны вплоть до мелочей.</translation>
+ </message>
+ <message>
+ <source>The entered address does not refer to a legacy (P2PKH) key. Message signing for SegWit and other non-P2PKH address types is not supported in this version of %1. Please check the address and try again.</source>
+ <translation type="unfinished">Введенный адрес не относится к устаревшему (P2PKH) ключу. Подписывание сообщений для SegWit и других не--P2PKH типов адресов не поддерживается в этой версии %1. Пожалуйста, проверьте адрес и попробуйте ещё раз.</translation>
+ </message>
+ </context>
+<context>
<name>TransactionDesc</name>
<message numerus="yes">
<source>matures in %n more block(s)</source>
@@ -1961,6 +1980,10 @@ The migration process will create a backup of the wallet before migrating. This
</translation>
</message>
<message>
+ <source>%1 (Certificate was not verified)</source>
+ <translation type="unfinished">%1 (Сертификат не был подтверждён)</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">Сумма</translation>
</message>
@@ -2634,5 +2657,21 @@ Unable to restore backup of wallet.</source>
<source>Error: Unable to read all records in the database</source>
<translation type="unfinished">Ошибка: не удалось прочитать все записи из базе данных</translation>
</message>
+ <message>
+ <source>Failed to disconnect block.</source>
+ <translation type="unfinished">Не удалось отключить блок</translation>
+ </message>
+ <message>
+ <source>Failed to read block.</source>
+ <translation type="unfinished">Не удалось прочитать блок</translation>
+ </message>
+ <message>
+ <source>Failed to write block.</source>
+ <translation type="unfinished">Не удалось записать блок</translation>
+ </message>
+ <message>
+ <source>Wallet file creation failed: %s</source>
+ <translation type="unfinished">Не удалось создать кошелёк 1%s</translation>
+ </message>
</context>
</TS> \ No newline at end of file
diff --git a/src/qt/locale/bitcoin_sw.ts b/src/qt/locale/bitcoin_sw.ts
index 199a0552b5..0497444d5e 100644
--- a/src/qt/locale/bitcoin_sw.ts
+++ b/src/qt/locale/bitcoin_sw.ts
@@ -325,7 +325,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<numerusform />
</translation>
</message>
- </context>
+ <message>
+ <source>default wallet</source>
+ <translation type="unfinished">mkoba chaguo-msingi</translation>
+ </message>
+</context>
<context>
<name>BitcoinGUI</name>
<message>
@@ -414,10 +418,19 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">&amp;Chaguo...</translation>
</message>
<message>
+ <source>&amp;Encrypt Wallet…</source>
+ <translation type="unfinished">&amp;Simba Mkoba...</translation>
+ </message>
+ <message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished">Funga funguo za siri zinazomiliki mkoba wako.</translation>
</message>
<message>
+ <source>&amp;Backup Wallet…</source>
+ <translation type="unfinished">&amp;Hifadhi Mkoba...
+</translation>
+ </message>
+ <message>
<source>&amp;Change Passphrase…</source>
<translation type="unfinished">&amp;Badilisha Nenosiri...</translation>
</message>
@@ -430,10 +443,18 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">Saini ujumbe na anwani zako za Bitcoin ili kuthibitisha umiliki wao.</translation>
</message>
<message>
+ <source>&amp;Verify message…</source>
+ <translation type="unfinished">&amp;Thibitisha ujumbe...</translation>
+ </message>
+ <message>
<source>Verify messages to ensure they were signed with specified Bitcoin addresses</source>
<translation type="unfinished">Hakikisha ujumbe umethibitishwa kuwa ulisainiwa na anwani za Bitcoin zilizotajwa</translation>
</message>
<message>
+ <source>&amp;Load PSBT from file…</source>
+ <translation type="unfinished">&amp;Pakia PSBT kutoka faili...</translation>
+ </message>
+ <message>
<source>Open &amp;URI…</source>
<translation type="unfinished">Fungua &amp;URI ...</translation>
</message>
@@ -509,6 +530,14 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</translation>
</message>
<message>
+ <source>%1 behind</source>
+ <translation type="unfinished">%1 nyuma</translation>
+ </message>
+ <message>
+ <source>Catching up…</source>
+ <translation type="unfinished">Inakamata...</translation>
+ </message>
+ <message>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished">Shughuli baada ya hii bado hazitaonekana.</translation>
</message>
@@ -525,22 +554,135 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">Habari</translation>
</message>
<message>
+ <source>Up to date</source>
+ <translation type="unfinished">Imesasishwa</translation>
+ </message>
+ <message>
+ <source>Load Partially Signed Bitcoin Transaction</source>
+ <translation type="unfinished">Pakia Muamala wa Bitcoin Uliosainiwa Kiasi</translation>
+ </message>
+ <message>
+ <source>Load PSBT from &amp;clipboard…</source>
+ <translation type="unfinished">Pakia PSBT kutoka &amp;clipboard...</translation>
+ </message>
+ <message>
+ <source>Load Partially Signed Bitcoin Transaction from clipboard</source>
+ <translation type="unfinished">Pakia Muamala wa Bitcoin Uliosainiwa Kiasi kutoka kwenye ubao wa kunakili</translation>
+ </message>
+ <message>
+ <source>Node window</source>
+ <translation type="unfinished">Dirisha la nodi</translation>
+ </message>
+ <message>
+ <source>Open node debugging and diagnostic console</source>
+ <translation type="unfinished">Fungua utatuzi wa nodi na koni ya uchunguzi</translation>
+ </message>
+ <message>
+ <source>&amp;Sending addresses</source>
+ <translation type="unfinished">&amp;Anwani za kutuma</translation>
+ </message>
+ <message>
+ <source>&amp;Receiving addresses</source>
+ <translation type="unfinished">&amp;Inapokea anwani</translation>
+ </message>
+ <message>
+ <source>Open a bitcoin: URI</source>
+ <translation type="unfinished">Fungua bitcoin: URI</translation>
+ </message>
+ <message>
<source>Open Wallet</source>
<translation type="unfinished">Fungua Pochi</translation>
</message>
<message>
+ <source>Open a wallet</source>
+ <translation type="unfinished">Fungua pochi</translation>
+ </message>
+ <message>
<source>Close wallet</source>
<translation type="unfinished">Funga pochi</translation>
</message>
<message>
+ <source>Restore Wallet…</source>
+ <extracomment>Name of the menu item that restores wallet from a backup file.</extracomment>
+ <translation type="unfinished">Rejesha Pochi...</translation>
+ </message>
+ <message>
+ <source>Restore a wallet from a backup file</source>
+ <extracomment>Status tip for Restore Wallet menu item</extracomment>
+ <translation type="unfinished">Rejesha mkoba kutoka kwa faili ya chelezo</translation>
+ </message>
+ <message>
+ <source>Close all wallets</source>
+ <translation type="unfinished">Funga pochi zote</translation>
+ </message>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">Hamisha Pochi</translation>
+ </message>
+ <message>
+ <source>Migrate a wallet</source>
+ <translation type="unfinished">Hamisha mkoba</translation>
+ </message>
+ <message>
+ <source>Show the %1 help message to get a list with possible Bitcoin command-line options</source>
+ <translation type="unfinished">Onyesha %1 ujumbe wa usaidizi ili kupata orodha na chaguo zinazowezekana za mstari wa amri za Bitcoin</translation>
+ </message>
+ <message>
+ <source>&amp;Mask values</source>
+ <translation type="unfinished">&amp;Funga maadili</translation>
+ </message>
+ <message>
+ <source>Mask the values in the Overview tab</source>
+ <translation type="unfinished">Ficha maadili kwenye kichupo cha Muhtasari</translation>
+ </message>
+ <message>
+ <source>No wallets available</source>
+ <translation type="unfinished">Hakuna pochi zinazopatikana</translation>
+ </message>
+ <message>
+ <source>Wallet Data</source>
+ <extracomment>Name of the wallet data file format.</extracomment>
+ <translation type="unfinished">Data ya Pochi</translation>
+ </message>
+ <message>
+ <source>Load Wallet Backup</source>
+ <extracomment>The title for Restore Wallet File Windows</extracomment>
+ <translation type="unfinished">Pakia Hifadhi Nakala ya Wallet</translation>
+ </message>
+ <message>
+ <source>Restore Wallet</source>
+ <extracomment>Title of pop-up window shown when the user is attempting to restore a wallet.</extracomment>
+ <translation type="unfinished">Rejesha Pochi</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<extracomment>Label of the input field where the name of the wallet is entered.</extracomment>
<translation type="unfinished">Jina la Wallet</translation>
</message>
<message>
+ <source>&amp;Window</source>
+ <translation type="unfinished">&amp;Dirisha</translation>
+ </message>
+ <message>
+ <source>Zoom</source>
+ <translation type="unfinished">Kuza</translation>
+ </message>
+ <message>
+ <source>Main Window</source>
+ <translation type="unfinished">Dirisha Kuu</translation>
+ </message>
+ <message>
+ <source>%1 client</source>
+ <translation type="unfinished">%1 mteja</translation>
+ </message>
+ <message>
<source>&amp;Hide</source>
<translation type="unfinished">&amp;Ficha</translation>
</message>
+ <message>
+ <source>S&amp;how</source>
+ <translation type="unfinished">Jinsi &amp; jinsi</translation>
+ </message>
<message numerus="yes">
<source>%n active connection(s) to Bitcoin network.</source>
<extracomment>A substring of the tooltip.</extracomment>
@@ -550,15 +692,99 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</translation>
</message>
<message>
+ <source>Click for more actions.</source>
+ <extracomment>A substring of the tooltip. "More actions" are available via the context menu.</extracomment>
+ <translation type="unfinished">Bofya kwa vitendo zaidi.</translation>
+ </message>
+ <message>
+ <source>Show Peers tab</source>
+ <extracomment>A context menu item. The "Peers tab" is an element of the "Node window".</extracomment>
+ <translation type="unfinished">Onyesha kichupo cha Marika</translation>
+ </message>
+ <message>
+ <source>Disable network activity</source>
+ <extracomment>A context menu item.</extracomment>
+ <translation type="unfinished">Zima shughuli za mtandao</translation>
+ </message>
+ <message>
+ <source>Enable network activity</source>
+ <extracomment>A context menu item. The network activity was disabled previously.</extracomment>
+ <translation type="unfinished">Washa shughuli za mtandao</translation>
+ </message>
+ <message>
+ <source>Pre-syncing Headers (%1%)…</source>
+ <translation type="unfinished">Kusawazisha Vichwa vya awali (%1%)...</translation>
+ </message>
+ <message>
+ <source>Error creating wallet</source>
+ <translation type="unfinished">Hitilafu unapounda pochi</translation>
+ </message>
+ <message>
+ <source>Cannot create new wallet, the software was compiled without sqlite support (required for descriptor wallets)</source>
+ <translation type="unfinished">Haiwezi kuunda pochi mpya, programu iliundwa bila usaidizi wa sqlite (inahitajika kwa pochi za maelezo)</translation>
+ </message>
+ <message>
<source>Error: %1</source>
<translation type="unfinished">Kosa: %1</translation>
</message>
<message>
+ <source>Warning: %1</source>
+ <translation type="unfinished">Onyo: %1</translation>
+ </message>
+ <message>
+ <source>Date: %1
+</source>
+ <translation type="unfinished">Tarehe: %1</translation>
+ </message>
+ <message>
+ <source>Amount: %1
+</source>
+ <translation type="unfinished">Kiasi: %1
+</translation>
+ </message>
+ <message>
+ <source>Wallet: %1
+</source>
+ <translation type="unfinished">Pochi: %1
+</translation>
+ </message>
+ <message>
+ <source>Type: %1
+</source>
+ <translation type="unfinished">Aina: %1</translation>
+ </message>
+ <message>
<source>Label: %1
</source>
<translation type="unfinished">Chapa: %1
</translation>
</message>
+ <message>
+ <source>Address: %1
+</source>
+ <translation type="unfinished">Anwani: %1
+</translation>
+ </message>
+ <message>
+ <source>Sent transaction</source>
+ <translation type="unfinished">Umetuma muamala</translation>
+ </message>
+ <message>
+ <source>Incoming transaction</source>
+ <translation type="unfinished">Muamala unaoingia</translation>
+ </message>
+ <message>
+ <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source>
+ <translation type="unfinished">Uzalishaji wa ufunguo wa HD ni &lt;b&gt;kuwezeshwa &lt;/b&gt;</translation>
+ </message>
+ <message>
+ <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source>
+ <translation type="unfinished">Uzalishaji wa ufunguo wa HD ni &lt;b&gt;kutowezeshwa&lt;/b&gt;</translation>
+ </message>
+ <message>
+ <source>Private key &lt;b&gt;disabled&lt;/b&gt;</source>
+ <translation type="unfinished">Ufunguo wa kibinafsi &lt;b&gt; umezimwa &lt;/b&gt;</translation>
+ </message>
</context>
<context>
<name>CoinControlDialog</name>
@@ -592,6 +818,13 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</message>
</context>
<context>
+ <name>MigrateWalletActivity</name>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">Hamisha Pochi</translation>
+ </message>
+ </context>
+<context>
<name>OpenWalletActivity</name>
<message>
<source>Open Wallet</source>
@@ -602,6 +835,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<context>
<name>RestoreWalletActivity</name>
<message>
+ <source>Restore Wallet</source>
+ <extracomment>Title of progress window which is displayed when wallets are being restored.</extracomment>
+ <translation type="unfinished">Rejesha Pochi</translation>
+ </message>
+ <message>
<source>Restore wallet warning</source>
<extracomment>Title of message box which is displayed when the wallet is restored with some warning.</extracomment>
<translation type="unfinished">Rejesha onyo la pochi</translation>
@@ -617,6 +855,10 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<source>Closing the wallet for too long can result in having to resync the entire chain if pruning is enabled.</source>
<translation type="unfinished">Kufunga pochi kwa muda mrefu sana kunaweza kusababisha kusawazisha tena mnyororo mzima ikiwa upogoaji umewezeshwa.</translation>
</message>
+ <message>
+ <source>Close all wallets</source>
+ <translation type="unfinished">Funga pochi zote</translation>
+ </message>
</context>
<context>
<name>CreateWalletDialog</name>
@@ -730,6 +972,10 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<context>
<name>OptionsDialog</name>
<message>
+ <source>&amp;Window</source>
+ <translation type="unfinished">&amp;Dirisha</translation>
+ </message>
+ <message>
<source>Error</source>
<translation type="unfinished">Onyo</translation>
</message>
@@ -750,6 +996,13 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</message>
</context>
<context>
+ <name>RPCConsole</name>
+ <message>
+ <source>Node window</source>
+ <translation type="unfinished">Dirisha la nodi</translation>
+ </message>
+ </context>
+<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&amp;Label:</source>
@@ -925,6 +1178,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished">Toa data katika kichupo cha sasa hadi kwenye faili</translation>
</message>
+ <message>
+ <source>Wallet Data</source>
+ <extracomment>Name of the wallet data file format.</extracomment>
+ <translation type="unfinished">Data ya Pochi</translation>
+ </message>
</context>
<context>
<name>bitcoin-core</name>
diff --git a/src/qt/locale/bitcoin_th.ts b/src/qt/locale/bitcoin_th.ts
index 3706a7bd98..464e39cbac 100644
--- a/src/qt/locale/bitcoin_th.ts
+++ b/src/qt/locale/bitcoin_th.ts
@@ -1,5 +1,12 @@
<TS version="2.1" language="th">
<context>
+ <name>AskPassphraseDialog</name>
+ <message>
+ <source>Back</source>
+ <translation type="unfinished">ย้อนกลับ</translation>
+ </message>
+ </context>
+<context>
<name>QObject</name>
<message>
<source>%1 didn't yet exit safely…</source>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 4db2d6016c..4d590c3eed 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -95,8 +95,7 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet)
ui->verticalLayout->setStretchFactor(ui->tabWidget, 1);
/* Main elements init */
- ui->databaseCache->setMinimum(nMinDbCache);
- ui->databaseCache->setMaximum(nMaxDbCache);
+ ui->databaseCache->setRange(nMinDbCache, std::numeric_limits<int>::max());
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
ui->pruneWarning->setVisible(false);
diff --git a/src/rest.cpp b/src/rest.cpp
index c42bc8e40c..4732922a15 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -309,8 +309,11 @@ static bool rest_block(const std::any& context,
if (!pblockindex) {
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- if (chainman.m_blockman.IsBlockPruned(*pblockindex)) {
- return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
+ if (!(pblockindex->nStatus & BLOCK_HAVE_DATA)) {
+ if (chainman.m_blockman.IsBlockPruned(*pblockindex)) {
+ return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
+ }
+ return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (not fully downloaded)");
}
pos = pblockindex->GetBlockPos();
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 7ffd03e71a..a9cea44779 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -201,8 +201,10 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn
case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
CBlockUndo blockUndo;
const bool is_not_pruned{WITH_LOCK(::cs_main, return !blockman.IsBlockPruned(blockindex))};
- const bool have_undo{is_not_pruned && blockman.UndoReadFromDisk(blockUndo, blockindex)};
-
+ bool have_undo{is_not_pruned && WITH_LOCK(::cs_main, return blockindex.nStatus & BLOCK_HAVE_UNDO)};
+ if (have_undo && !blockman.UndoReadFromDisk(blockUndo, blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
for (size_t i = 0; i < block.vtx.size(); ++i) {
const CTransactionRef& tx = block.vtx.at(i);
// coinbase transaction (i.e. i == 0) doesn't have undo data
@@ -597,20 +599,32 @@ static RPCHelpMan getblockheader()
};
}
+void CheckBlockDataAvailability(BlockManager& blockman, const CBlockIndex& blockindex, bool check_for_undo)
+{
+ AssertLockHeld(cs_main);
+ uint32_t flag = check_for_undo ? BLOCK_HAVE_UNDO : BLOCK_HAVE_DATA;
+ if (!(blockindex.nStatus & flag)) {
+ if (blockman.IsBlockPruned(blockindex)) {
+ throw JSONRPCError(RPC_MISC_ERROR, strprintf("%s not available (pruned data)", check_for_undo ? "Undo data" : "Block"));
+ }
+ if (check_for_undo) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available");
+ }
+ throw JSONRPCError(RPC_MISC_ERROR, "Block not available (not fully downloaded)");
+ }
+}
+
static CBlock GetBlockChecked(BlockManager& blockman, const CBlockIndex& blockindex)
{
CBlock block;
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false);
}
if (!blockman.ReadBlockFromDisk(block, blockindex)) {
- // Block not found on disk. This could be because we have the block
- // header in our index but not yet have the block or did not accept the
- // block. Or if the block was pruned right after we released the lock above.
+ // Block not found on disk. This shouldn't normally happen unless the block was
+ // pruned right after we released the lock above.
throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
}
@@ -623,16 +637,13 @@ static std::vector<uint8_t> GetRawBlockChecked(BlockManager& blockman, const CBl
FlatFilePos pos{};
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false);
pos = blockindex.GetBlockPos();
}
if (!blockman.ReadRawBlockFromDisk(data, pos)) {
- // Block not found on disk. This could be because we have the block
- // header in our index but not yet have the block or did not accept the
- // block. Or if the block was pruned right after we released the lock above.
+ // Block not found on disk. This shouldn't normally happen unless the block was
+ // pruned right after we released the lock above.
throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
}
@@ -648,9 +659,7 @@ static CBlockUndo GetUndoChecked(BlockManager& blockman, const CBlockIndex& bloc
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/true);
}
if (!blockman.UndoReadFromDisk(blockUndo, blockindex)) {
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index b5a0382da1..a23f5cf024 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -60,5 +60,6 @@ UniValue CreateUTXOSnapshot(
//! Return height of highest block that has been pruned, or std::nullopt if no blocks have been pruned
std::optional<int> GetPruneHeight(const node::BlockManager& blockman, const CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+void CheckBlockDataAvailability(node::BlockManager& blockman, const CBlockIndex& blockindex, bool check_for_undo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
#endif // BITCOIN_RPC_BLOCKCHAIN_H
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 375352b18d..b986c26e7a 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -45,9 +45,9 @@
#include <memory>
#include <stdint.h>
-using node::BlockAssembler;
-using node::CBlockTemplate;
+using interfaces::BlockTemplate;
using interfaces::Mining;
+using node::BlockAssembler;
using node::NodeContext;
using node::RegenerateCommitments;
using node::UpdateTime;
@@ -130,7 +130,7 @@ static RPCHelpMan getnetworkhashps()
};
}
-static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
+static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock&& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
{
block_out.reset();
block.hashMerkleRoot = BlockMerkleRoot(block);
@@ -146,7 +146,7 @@ static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock& bl
return true;
}
- block_out = std::make_shared<const CBlock>(block);
+ block_out = std::make_shared<const CBlock>(std::move(block));
if (!process_new_block) return true;
@@ -161,12 +161,11 @@ static UniValue generateBlocks(ChainstateManager& chainman, Mining& miner, const
{
UniValue blockHashes(UniValue::VARR);
while (nGenerate > 0 && !chainman.m_interrupt) {
- std::unique_ptr<CBlockTemplate> pblocktemplate(miner.createNewBlock(coinbase_script));
- if (!pblocktemplate.get())
- throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
+ std::unique_ptr<BlockTemplate> block_template(miner.createNewBlock(coinbase_script));
+ CHECK_NONFATAL(block_template);
std::shared_ptr<const CBlock> block_out;
- if (!GenerateBlock(chainman, miner, pblocktemplate->block, nMaxTries, block_out, /*process_new_block=*/true)) {
+ if (!GenerateBlock(chainman, miner, block_template->getBlock(), nMaxTries, block_out, /*process_new_block=*/true)) {
break;
}
@@ -371,11 +370,10 @@ static RPCHelpMan generateblock()
ChainstateManager& chainman = EnsureChainman(node);
{
- std::unique_ptr<CBlockTemplate> blocktemplate{miner.createNewBlock(coinbase_script, {.use_mempool = false})};
- if (!blocktemplate) {
- throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
- }
- block = blocktemplate->block;
+ std::unique_ptr<BlockTemplate> block_template{miner.createNewBlock(coinbase_script, {.use_mempool = false})};
+ CHECK_NONFATAL(block_template);
+
+ block = block_template->getBlock();
}
CHECK_NONFATAL(block.vtx.size() == 1);
@@ -394,7 +392,7 @@ static RPCHelpMan generateblock()
std::shared_ptr<const CBlock> block_out;
uint64_t max_tries{DEFAULT_MAX_TRIES};
- if (!GenerateBlock(chainman, miner, block, max_tries, block_out, process_new_block) || !block_out) {
+ if (!GenerateBlock(chainman, miner, std::move(block), max_tries, block_out, process_new_block) || !block_out) {
throw JSONRPCError(RPC_MISC_ERROR, "Failed to make block.");
}
@@ -800,7 +798,7 @@ static RPCHelpMan getblocktemplate()
// Update block
static CBlockIndex* pindexPrev;
static int64_t time_start;
- static std::unique_ptr<CBlockTemplate> pblocktemplate;
+ static std::unique_ptr<BlockTemplate> block_template;
if (!pindexPrev || pindexPrev->GetBlockHash() != tip ||
(miner.getTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
{
@@ -814,20 +812,19 @@ static RPCHelpMan getblocktemplate()
// Create new block
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = miner.createNewBlock(scriptDummy);
- if (!pblocktemplate) {
- throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
- }
+ block_template = miner.createNewBlock(scriptDummy);
+ CHECK_NONFATAL(block_template);
+
// Need to update only after we know createNewBlock succeeded
pindexPrev = pindexPrevNew;
}
CHECK_NONFATAL(pindexPrev);
- CBlock* pblock = &pblocktemplate->block; // pointer for convenience
+ CBlock block{block_template->getBlock()};
// Update nTime
- UpdateTime(pblock, consensusParams, pindexPrev);
- pblock->nNonce = 0;
+ UpdateTime(&block, consensusParams, pindexPrev);
+ block.nNonce = 0;
// NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration
const bool fPreSegWit = !DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_SEGWIT);
@@ -836,8 +833,11 @@ static RPCHelpMan getblocktemplate()
UniValue transactions(UniValue::VARR);
std::map<uint256, int64_t> setTxIndex;
+ std::vector<CAmount> tx_fees{block_template->getTxFees()};
+ std::vector<CAmount> tx_sigops{block_template->getTxSigops()};
+
int i = 0;
- for (const auto& it : pblock->vtx) {
+ for (const auto& it : block.vtx) {
const CTransaction& tx = *it;
uint256 txHash = tx.GetHash();
setTxIndex[txHash] = i++;
@@ -860,8 +860,8 @@ static RPCHelpMan getblocktemplate()
entry.pushKV("depends", std::move(deps));
int index_in_template = i - 1;
- entry.pushKV("fee", pblocktemplate->vTxFees[index_in_template]);
- int64_t nTxSigOps = pblocktemplate->vTxSigOpsCost[index_in_template];
+ entry.pushKV("fee", tx_fees.at(index_in_template));
+ int64_t nTxSigOps{tx_sigops.at(index_in_template)};
if (fPreSegWit) {
CHECK_NONFATAL(nTxSigOps % WITNESS_SCALE_FACTOR == 0);
nTxSigOps /= WITNESS_SCALE_FACTOR;
@@ -874,7 +874,7 @@ static RPCHelpMan getblocktemplate()
UniValue aux(UniValue::VOBJ);
- arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits);
+ arith_uint256 hashTarget = arith_uint256().SetCompact(block.nBits);
UniValue aMutable(UniValue::VARR);
aMutable.push_back("time");
@@ -904,7 +904,7 @@ static RPCHelpMan getblocktemplate()
break;
case ThresholdState::LOCKED_IN:
// Ensure bit is set in block version
- pblock->nVersion |= chainman.m_versionbitscache.Mask(consensusParams, pos);
+ block.nVersion |= chainman.m_versionbitscache.Mask(consensusParams, pos);
[[fallthrough]];
case ThresholdState::STARTED:
{
@@ -913,7 +913,7 @@ static RPCHelpMan getblocktemplate()
if (setClientRules.find(vbinfo.name) == setClientRules.end()) {
if (!vbinfo.gbt_force) {
// If the client doesn't support this, don't indicate it in the [default] version
- pblock->nVersion &= ~chainman.m_versionbitscache.Mask(consensusParams, pos);
+ block.nVersion &= ~chainman.m_versionbitscache.Mask(consensusParams, pos);
}
}
break;
@@ -933,15 +933,15 @@ static RPCHelpMan getblocktemplate()
}
}
}
- result.pushKV("version", pblock->nVersion);
+ result.pushKV("version", block.nVersion);
result.pushKV("rules", std::move(aRules));
result.pushKV("vbavailable", std::move(vbavailable));
result.pushKV("vbrequired", int(0));
- result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex());
+ result.pushKV("previousblockhash", block.hashPrevBlock.GetHex());
result.pushKV("transactions", std::move(transactions));
result.pushKV("coinbaseaux", std::move(aux));
- result.pushKV("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue);
+ result.pushKV("coinbasevalue", (int64_t)block.vtx[0]->vout[0].nValue);
result.pushKV("longpollid", tip.GetHex() + ToString(nTransactionsUpdatedLast));
result.pushKV("target", hashTarget.GetHex());
result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1);
@@ -960,16 +960,16 @@ static RPCHelpMan getblocktemplate()
if (!fPreSegWit) {
result.pushKV("weightlimit", (int64_t)MAX_BLOCK_WEIGHT);
}
- result.pushKV("curtime", pblock->GetBlockTime());
- result.pushKV("bits", strprintf("%08x", pblock->nBits));
+ result.pushKV("curtime", block.GetBlockTime());
+ result.pushKV("bits", strprintf("%08x", block.nBits));
result.pushKV("height", (int64_t)(pindexPrev->nHeight+1));
if (consensusParams.signet_blocks) {
result.pushKV("signet_challenge", HexStr(consensusParams.signet_challenge));
}
- if (!pblocktemplate->vchCoinbaseCommitment.empty()) {
- result.pushKV("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment));
+ if (!block_template->getCoinbaseCommitment().empty()) {
+ result.pushKV("default_witness_commitment", HexStr(block_template->getCoinbaseCommitment()));
}
return result;
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 21bc0e52f1..bb072370ce 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -405,11 +405,16 @@ static RPCHelpMan getrawtransaction()
CBlockUndo blockUndo;
CBlock block;
- if (tx->IsCoinBase() || !blockindex || WITH_LOCK(::cs_main, return chainman.m_blockman.IsBlockPruned(*blockindex)) ||
- !(chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex) && chainman.m_blockman.ReadBlockFromDisk(block, *blockindex))) {
+ if (tx->IsCoinBase() || !blockindex || WITH_LOCK(::cs_main, return !(blockindex->nStatus & BLOCK_HAVE_MASK))) {
TxToJSON(*tx, hash_block, result, chainman.ActiveChainstate());
return result;
}
+ if (!chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
+ if (!chainman.m_blockman.ReadBlockFromDisk(block, *blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Block data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
CTxUndo* undoTX {nullptr};
auto it = std::find_if(block.vtx.begin(), block.vtx.end(), [tx](CTransactionRef t){ return *t == *tx; });
diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp
index 7958deb677..40294fda06 100644
--- a/src/rpc/txoutproof.cpp
+++ b/src/rpc/txoutproof.cpp
@@ -10,6 +10,7 @@
#include <merkleblock.h>
#include <node/blockstorage.h>
#include <primitives/transaction.h>
+#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/server_util.h>
#include <rpc/util.h>
@@ -96,6 +97,10 @@ static RPCHelpMan gettxoutproof()
}
}
+ {
+ LOCK(cs_main);
+ CheckBlockDataAvailability(chainman.m_blockman, *pblockindex, /*check_for_undo=*/false);
+ }
CBlock block;
if (!chainman.m_blockman.ReadBlockFromDisk(block, *pblockindex)) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk");
diff --git a/src/streams.cpp b/src/streams.cpp
index cdd36a86fe..5f7baf92b9 100644
--- a/src/streams.cpp
+++ b/src/streams.cpp
@@ -4,21 +4,29 @@
#include <span.h>
#include <streams.h>
+#include <util/fs_helpers.h>
#include <array>
+AutoFile::AutoFile(std::FILE* file, std::vector<std::byte> data_xor)
+ : m_file{file}, m_xor{std::move(data_xor)}
+{
+ if (!IsNull()) {
+ auto pos{std::ftell(m_file)};
+ if (pos >= 0) m_position = pos;
+ }
+}
+
std::size_t AutoFile::detail_fread(Span<std::byte> dst)
{
if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr");
- if (m_xor.empty()) {
- return std::fread(dst.data(), 1, dst.size(), m_file);
- } else {
- const auto init_pos{std::ftell(m_file)};
- if (init_pos < 0) throw std::ios_base::failure("AutoFile::read: ftell failed");
- std::size_t ret{std::fread(dst.data(), 1, dst.size(), m_file)};
- util::Xor(dst.subspan(0, ret), m_xor, init_pos);
- return ret;
+ size_t ret = std::fread(dst.data(), 1, dst.size(), m_file);
+ if (!m_xor.empty()) {
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown");
+ util::Xor(dst.subspan(0, ret), m_xor, *m_position);
}
+ if (m_position.has_value()) *m_position += ret;
+ return ret;
}
void AutoFile::seek(int64_t offset, int origin)
@@ -29,18 +37,23 @@ void AutoFile::seek(int64_t offset, int origin)
if (std::fseek(m_file, offset, origin) != 0) {
throw std::ios_base::failure(feof() ? "AutoFile::seek: end of file" : "AutoFile::seek: fseek failed");
}
+ if (origin == SEEK_SET) {
+ m_position = offset;
+ } else if (origin == SEEK_CUR && m_position.has_value()) {
+ *m_position += offset;
+ } else {
+ int64_t r{std::ftell(m_file)};
+ if (r < 0) {
+ throw std::ios_base::failure("AutoFile::seek: ftell failed");
+ }
+ m_position = r;
+ }
}
int64_t AutoFile::tell()
{
- if (IsNull()) {
- throw std::ios_base::failure("AutoFile::tell: file handle is nullptr");
- }
- int64_t r{std::ftell(m_file)};
- if (r < 0) {
- throw std::ios_base::failure("AutoFile::tell: ftell failed");
- }
- return r;
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::tell: position unknown");
+ return *m_position;
}
void AutoFile::read(Span<std::byte> dst)
@@ -60,6 +73,7 @@ void AutoFile::ignore(size_t nSize)
throw std::ios_base::failure(feof() ? "AutoFile::ignore: end of file" : "AutoFile::ignore: fread failed");
}
nSize -= nNow;
+ if (m_position.has_value()) *m_position += nNow;
}
}
@@ -70,19 +84,34 @@ void AutoFile::write(Span<const std::byte> src)
if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) {
throw std::ios_base::failure("AutoFile::write: write failed");
}
+ if (m_position.has_value()) *m_position += src.size();
} else {
- auto current_pos{std::ftell(m_file)};
- if (current_pos < 0) throw std::ios_base::failure("AutoFile::write: ftell failed");
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::write: position unknown");
std::array<std::byte, 4096> buf;
while (src.size() > 0) {
auto buf_now{Span{buf}.first(std::min<size_t>(src.size(), buf.size()))};
std::copy(src.begin(), src.begin() + buf_now.size(), buf_now.begin());
- util::Xor(buf_now, m_xor, current_pos);
+ util::Xor(buf_now, m_xor, *m_position);
if (std::fwrite(buf_now.data(), 1, buf_now.size(), m_file) != buf_now.size()) {
throw std::ios_base::failure{"XorFile::write: failed"};
}
src = src.subspan(buf_now.size());
- current_pos += buf_now.size();
+ *m_position += buf_now.size();
}
}
}
+
+bool AutoFile::Commit()
+{
+ return ::FileCommit(m_file);
+}
+
+bool AutoFile::IsError()
+{
+ return ferror(m_file);
+}
+
+bool AutoFile::Truncate(unsigned size)
+{
+ return ::TruncateFile(m_file, size);
+}
diff --git a/src/streams.h b/src/streams.h
index c2a9dea287..431a4d77c6 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -390,9 +390,10 @@ class AutoFile
protected:
std::FILE* m_file;
std::vector<std::byte> m_xor;
+ std::optional<int64_t> m_position;
public:
- explicit AutoFile(std::FILE* file, std::vector<std::byte> data_xor={}) : m_file{file}, m_xor{std::move(data_xor)} {}
+ explicit AutoFile(std::FILE* file, std::vector<std::byte> data_xor={});
~AutoFile() { fclose(); }
@@ -419,12 +420,6 @@ public:
return ret;
}
- /** Get wrapped FILE* without transfer of ownership.
- * @note Ownership of the FILE* will remain with this class. Use this only if the scope of the
- * AutoFile outlives use of the passed pointer.
- */
- std::FILE* Get() const { return m_file; }
-
/** Return true if the wrapped FILE* is nullptr, false otherwise.
*/
bool IsNull() const { return m_file == nullptr; }
@@ -458,6 +453,10 @@ public:
::Unserialize(*this, obj);
return *this;
}
+
+ bool Commit();
+ bool IsError();
+ bool Truncate(unsigned size);
};
/** Wrapper around an AutoFile& that implements a ring buffer to
diff --git a/src/test/README.md b/src/test/README.md
index f71e0771bf..7e0f245ee8 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -10,14 +10,19 @@ The build system is set up to compile an executable called `test_bitcoin`
that runs all of the unit tests. The main source file for the test library is found in
`util/setup_common.cpp`.
+The examples in this document assume the build directory is named
+`build`. You'll need to adapt them if you named it differently.
+
### Compiling/running unit tests
Unit tests will be automatically compiled if dependencies were met
during the generation of the Bitcoin Core build system
and tests weren't explicitly disabled.
-Assuming the build directory is named `build`, the unit tests can be run
-with `ctest --test-dir build`, which includes unit tests from subtrees.
+The unit tests can be run with `ctest --test-dir build`, which includes unit
+tests from subtrees.
+
+Run `test_bitcoin --list_content` for the full list of tests.
To run the unit tests manually, launch `build/src/test/test_bitcoin`. To recompile
after a test file was modified, run `cmake --build build` and then run the test again. If you
@@ -35,34 +40,45 @@ the `src/qt/test/test_main.cpp` file.
### Running individual tests
-`test_bitcoin` accepts the command line arguments from the boost framework.
-For example, to run just the `getarg_tests` suite of tests:
+The `test_bitcoin` runner accepts command line arguments from the Boost
+framework. To see the list of arguments that may be passed, run:
+
+```
+test_bitcoin --help
+```
+
+For example, to run only the tests in the `getarg_tests` file, with full logging:
```bash
build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests
```
-`log_level` controls the verbosity of the test framework, which logs when a
-test case is entered, for example.
+or
-`test_bitcoin` also accepts some of the command line arguments accepted by
-`bitcoind`. Use `--` to separate these sets of arguments:
+```bash
+build/src/test/test_bitcoin -l all -t getarg_tests
+```
+
+or to run only the doubledash test in `getarg_tests`
```bash
-build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1
+build/src/test/test_bitcoin --run_test=getarg_tests/doubledash
```
-The `-printtoconsole=1` after the two dashes sends debug logging, which
-normally goes only to `debug.log` within the data directory, also to the
-standard terminal output.
+The `--log_level=` (or `-l`) argument controls the verbosity of the test output.
-... or to run just the doubledash test:
+The `test_bitcoin` runner also accepts some of the command line arguments accepted by
+`bitcoind`. Use `--` to separate these sets of arguments:
```bash
-build/src/test/test_bitcoin --run_test=getarg_tests/doubledash
+build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1
```
-`test_bitcoin` creates a temporary working (data) directory with a randomly
+The `-printtoconsole=1` after the two dashes sends debug logging, which
+normally goes only to `debug.log` within the data directory, to the
+standard terminal output as well.
+
+Running `test_bitcoin` creates a temporary working (data) directory with a randomly
generated pathname within `test_common bitcoin/`, which in turn is within
the system's temporary directory (see
[`temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path)).
@@ -97,8 +113,6 @@ If you run an entire test suite, such as `--run_test=getarg_tests`, or all the t
(by not specifying `--run_test`), a separate directory
will be created for each individual test.
-Run `test_bitcoin --help` for the full list of tests.
-
### Adding test cases
To add a new unit test file to our test suite, you need
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index da6ff77924..c4f58ebecf 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -196,21 +196,21 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_AUTO_TEST_CASE(addrman_select_by_network)
{
auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_IPV4).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV4).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_IPV4}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV4}).first.IsValid());
// add ipv4 address to the new table
CNetAddr source = ResolveIP("252.2.2.2");
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_IPV4).first == addr1);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV6).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_ONION).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_I2P).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_CJDNS).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_CJDNS).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_IPV4}).first == addr1);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV6}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_ONION}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_I2P}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_CJDNS}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_CJDNS}).first.IsValid());
BOOST_CHECK(addrman->Select(/*new_only=*/false).first == addr1);
// add I2P address to the new table
@@ -218,25 +218,29 @@ BOOST_AUTO_TEST_CASE(addrman_select_by_network)
i2p_addr.SetSpecial("udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.i2p");
BOOST_CHECK(addrman->Add({i2p_addr}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_I2P).first == i2p_addr);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_I2P).first == i2p_addr);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV6).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_ONION).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_CJDNS).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_I2P}).first == i2p_addr);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_I2P}).first == i2p_addr);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
+ std::unordered_set<Network> nets_with_entries = {NET_IPV4, NET_I2P};
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, nets_with_entries).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV6}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_ONION}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_CJDNS}).first.IsValid());
+ std::unordered_set<Network> nets_without_entries = {NET_IPV6, NET_ONION, NET_CJDNS};
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, nets_without_entries).first.IsValid());
// bump I2P address to tried table
BOOST_CHECK(addrman->Good(i2p_addr));
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_I2P).first.IsValid());
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_I2P).first == i2p_addr);
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_I2P}).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_I2P}).first == i2p_addr);
// add another I2P address to the new table
CAddress i2p_addr2;
i2p_addr2.SetSpecial("c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p");
BOOST_CHECK(addrman->Add({i2p_addr2}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_I2P).first == i2p_addr2);
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_I2P}).first == i2p_addr2);
// ensure that both new and tried table are selected from
bool new_selected{false};
@@ -244,7 +248,7 @@ BOOST_AUTO_TEST_CASE(addrman_select_by_network)
int counter = 256;
while (--counter > 0 && (!new_selected || !tried_selected)) {
- const CAddress selected{addrman->Select(/*new_only=*/false, NET_I2P).first};
+ const CAddress selected{addrman->Select(/*new_only=*/false, {NET_I2P}).first};
BOOST_REQUIRE(selected == i2p_addr || selected == i2p_addr2);
if (selected == i2p_addr) {
tried_selected = true;
@@ -277,7 +281,7 @@ BOOST_AUTO_TEST_CASE(addrman_select_special)
// since the only ipv4 address is on the new table, ensure that the new
// table gets selected even if new_only is false. if the table was being
// selected at random, this test will sporadically fail
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
}
BOOST_AUTO_TEST_CASE(addrman_new_collisions)
diff --git a/src/test/fuzz/CMakeLists.txt b/src/test/fuzz/CMakeLists.txt
index 165add2e5a..1c7b0d5c25 100644
--- a/src/test/fuzz/CMakeLists.txt
+++ b/src/test/fuzz/CMakeLists.txt
@@ -72,6 +72,7 @@ add_executable(fuzz
netbase_dns_lookup.cpp
node_eviction.cpp
p2p_handshake.cpp
+ p2p_headers_presync.cpp
p2p_transport_serialization.cpp
package_eval.cpp
parse_hd_keypath.cpp
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index c324b4fdd9..593086af21 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -285,7 +285,15 @@ FUZZ_TARGET(addrman, .init = initialize_addrman)
auto max_pct = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
auto filtered = fuzzed_data_provider.ConsumeBool();
(void)const_addr_man.GetAddr(max_addresses, max_pct, network, filtered);
- (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool(), network);
+
+ std::unordered_set<Network> nets;
+ for (const auto& net : ALL_NETWORKS) {
+ if (fuzzed_data_provider.ConsumeBool()) {
+ nets.insert(net);
+ }
+ }
+ (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool(), nets);
+
std::optional<bool> in_new;
if (fuzzed_data_provider.ConsumeBool()) {
in_new = fuzzed_data_provider.ConsumeBool();
diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp
index 45316b6b21..81761c7bf9 100644
--- a/src/test/fuzz/autofile.cpp
+++ b/src/test/fuzz/autofile.cpp
@@ -56,7 +56,6 @@ FUZZ_TARGET(autofile)
WriteToStream(fuzzed_data_provider, auto_file);
});
}
- (void)auto_file.Get();
(void)auto_file.IsNull();
if (fuzzed_data_provider.ConsumeBool()) {
FILE* f = auto_file.release();
diff --git a/src/test/fuzz/cluster_linearize.cpp b/src/test/fuzz/cluster_linearize.cpp
index 2dfdfbb41d..d91f85d867 100644
--- a/src/test/fuzz/cluster_linearize.cpp
+++ b/src/test/fuzz/cluster_linearize.cpp
@@ -165,6 +165,23 @@ std::pair<std::vector<ClusterIndex>, bool> SimpleLinearize(const DepGraph<SetTyp
return {std::move(linearization), optimal};
}
+/** Stitch connected components together in a DepGraph, guaranteeing its corresponding cluster is connected. */
+template<typename BS>
+void MakeConnected(DepGraph<BS>& depgraph)
+{
+ auto todo = BS::Fill(depgraph.TxCount());
+ auto comp = depgraph.FindConnectedComponent(todo);
+ Assume(depgraph.IsConnected(comp));
+ todo -= comp;
+ while (todo.Any()) {
+ auto nextcomp = depgraph.FindConnectedComponent(todo);
+ Assume(depgraph.IsConnected(nextcomp));
+ depgraph.AddDependency(comp.Last(), nextcomp.First());
+ todo -= nextcomp;
+ comp = nextcomp;
+ }
+}
+
/** Given a dependency graph, and a todo set, read a topological subset of todo from reader. */
template<typename SetType>
SetType ReadTopologicalSet(const DepGraph<SetType>& depgraph, const SetType& todo, SpanReader& reader)
@@ -369,6 +386,20 @@ FUZZ_TARGET(clusterlin_components)
assert(depgraph.FindConnectedComponent(todo).None());
}
+FUZZ_TARGET(clusterlin_make_connected)
+{
+ // Verify that MakeConnected makes graphs connected.
+
+ SpanReader reader(buffer);
+ DepGraph<TestBitSet> depgraph;
+ try {
+ reader >> Using<DepGraphFormatter>(depgraph);
+ } catch (const std::ios_base::failure&) {}
+ MakeConnected(depgraph);
+ SanityCheck(depgraph);
+ assert(depgraph.IsConnected());
+}
+
FUZZ_TARGET(clusterlin_chunking)
{
// Verify the correctness of the ChunkLinearization function.
@@ -398,7 +429,7 @@ FUZZ_TARGET(clusterlin_chunking)
SetInfo<TestBitSet> accumulator, best;
for (ClusterIndex idx : linearization) {
if (todo[idx]) {
- accumulator |= SetInfo(depgraph, idx);
+ accumulator.Set(depgraph, idx);
if (best.feerate.IsEmpty() || accumulator.feerate >> best.feerate) {
best = accumulator;
}
@@ -427,6 +458,7 @@ FUZZ_TARGET(clusterlin_ancestor_finder)
while (todo.Any()) {
// Call the ancestor finder's FindCandidateSet for what remains of the graph.
assert(!anc_finder.AllDone());
+ assert(todo.Count() == anc_finder.NumRemaining());
auto best_anc = anc_finder.FindCandidateSet();
// Sanity check the result.
assert(best_anc.transactions.Any());
@@ -458,6 +490,7 @@ FUZZ_TARGET(clusterlin_ancestor_finder)
anc_finder.MarkDone(del_set);
}
assert(anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == 0);
}
static constexpr auto MAX_SIMPLE_ITERATIONS = 300000;
@@ -468,13 +501,17 @@ FUZZ_TARGET(clusterlin_search_finder)
// and comparing with the results from SimpleCandidateFinder, ExhaustiveCandidateFinder, and
// AncestorCandidateFinder.
- // Retrieve an RNG seed and a depgraph from the fuzz input.
+ // Retrieve an RNG seed, a depgraph, and whether to make it connected, from the fuzz input.
SpanReader reader(buffer);
DepGraph<TestBitSet> depgraph;
uint64_t rng_seed{0};
+ uint8_t make_connected{1};
try {
- reader >> Using<DepGraphFormatter>(depgraph) >> rng_seed;
+ reader >> Using<DepGraphFormatter>(depgraph) >> rng_seed >> make_connected;
} catch (const std::ios_base::failure&) {}
+ // The most complicated graphs are connected ones (other ones just split up). Optionally force
+ // the graph to be connected.
+ if (make_connected) MakeConnected(depgraph);
// Instantiate ALL the candidate finders.
SearchCandidateFinder src_finder(depgraph, rng_seed);
@@ -488,6 +525,7 @@ FUZZ_TARGET(clusterlin_search_finder)
assert(!smp_finder.AllDone());
assert(!exh_finder.AllDone());
assert(!anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == todo.Count());
// For each iteration, read an iteration count limit from the fuzz input.
uint64_t max_iterations = 1;
@@ -513,9 +551,17 @@ FUZZ_TARGET(clusterlin_search_finder)
assert(found.transactions.IsSupersetOf(depgraph.Ancestors(i) & todo));
}
- // At most 2^N-1 iterations can be required: the number of non-empty subsets a graph with N
- // transactions has.
- assert(iterations_done <= ((uint64_t{1} << todo.Count()) - 1));
+ // At most 2^(N-1) iterations can be required: the maximum number of non-empty topological
+ // subsets a (connected) cluster with N transactions can have. Even when the cluster is no
+ // longer connected after removing certain transactions, this holds, because the connected
+ // components are searched separately.
+ assert(iterations_done <= (uint64_t{1} << (todo.Count() - 1)));
+ // Additionally, test that no more than sqrt(2^N)+1 iterations are required. This is just
+ // an empirical bound that seems to hold, without proof. Still, add a test for it so we
+ // can learn about counterexamples if they exist.
+ if (iterations_done >= 1 && todo.Count() <= 63) {
+ Assume((iterations_done - 1) * (iterations_done - 1) <= uint64_t{1} << todo.Count());
+ }
// Perform quality checks only if SearchCandidateFinder claims an optimal result.
if (iterations_done < max_iterations) {
@@ -562,6 +608,7 @@ FUZZ_TARGET(clusterlin_search_finder)
assert(smp_finder.AllDone());
assert(exh_finder.AllDone());
assert(anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == 0);
}
FUZZ_TARGET(clusterlin_linearization_chunking)
@@ -621,7 +668,7 @@ FUZZ_TARGET(clusterlin_linearization_chunking)
SetInfo<TestBitSet> accumulator, best;
for (auto j : linearization) {
if (todo[j] && !combined[j]) {
- accumulator |= SetInfo(depgraph, j);
+ accumulator.Set(depgraph, j);
if (best.feerate.IsEmpty() || accumulator.feerate > best.feerate) {
best = accumulator;
}
@@ -685,14 +732,19 @@ FUZZ_TARGET(clusterlin_linearize)
{
// Verify the behavior of Linearize().
- // Retrieve an RNG seed, an iteration count, and a depgraph from the fuzz input.
+ // Retrieve an RNG seed, an iteration count, a depgraph, and whether to make it connected from
+ // the fuzz input.
SpanReader reader(buffer);
DepGraph<TestBitSet> depgraph;
uint64_t rng_seed{0};
uint64_t iter_count{0};
+ uint8_t make_connected{1};
try {
- reader >> VARINT(iter_count) >> Using<DepGraphFormatter>(depgraph) >> rng_seed;
+ reader >> VARINT(iter_count) >> Using<DepGraphFormatter>(depgraph) >> rng_seed >> make_connected;
} catch (const std::ios_base::failure&) {}
+ // The most complicated graphs are connected ones (other ones just split up). Optionally force
+ // the graph to be connected.
+ if (make_connected) MakeConnected(depgraph);
// Optionally construct an old linearization for it.
std::vector<ClusterIndex> old_linearization;
@@ -721,12 +773,24 @@ FUZZ_TARGET(clusterlin_linearize)
}
// If the iteration count is sufficiently high, an optimal linearization must be found.
- // Each linearization step can use up to 2^k iterations, with steps k=1..n. That sum is
- // 2 * (2^n - 1)
+ // Each linearization step can use up to 2^(k-1) iterations, with steps k=1..n. That sum is
+ // 2^n - 1.
const uint64_t n = depgraph.TxCount();
- if (n <= 18 && iter_count > 2U * ((uint64_t{1} << n) - 1U)) {
+ if (n <= 19 && iter_count > (uint64_t{1} << n)) {
assert(optimal);
}
+ // Additionally, if the assumption of sqrt(2^k)+1 iterations per step holds, plus ceil(k/4)
+ // start-up cost per step, plus ceil(n^2/64) start-up cost overall, we can compute the upper
+ // bound for a whole linearization (summing for k=1..n) using the Python expression
+ // [sum((k+3)//4 + int(math.sqrt(2**k)) + 1 for k in range(1, n + 1)) + (n**2 + 63) // 64 for n in range(0, 35)]:
+ static constexpr uint64_t MAX_OPTIMAL_ITERS[] = {
+ 0, 4, 8, 12, 18, 26, 37, 51, 70, 97, 133, 182, 251, 346, 480, 666, 927, 1296, 1815, 2545,
+ 3576, 5031, 7087, 9991, 14094, 19895, 28096, 39690, 56083, 79263, 112041, 158391, 223936,
+ 316629, 447712
+ };
+ if (n < std::size(MAX_OPTIMAL_ITERS) && iter_count >= MAX_OPTIMAL_ITERS[n]) {
+ Assume(optimal);
+ }
// If Linearize claims optimal result, run quality tests.
if (optimal) {
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index e27b2065d5..b9e3154106 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -69,7 +69,7 @@ FUZZ_TARGET(integer, .init = initialize_integer)
const bool b = fuzzed_data_provider.ConsumeBool();
const Consensus::Params& consensus_params = Params().GetConsensus();
- (void)CheckProofOfWork(u256, u32, consensus_params);
+ (void)CheckProofOfWorkImpl(u256, u32, consensus_params);
if (u64 <= MAX_MONEY) {
const uint64_t compressed_money_amount = CompressAmount(u64);
assert(u64 == DecompressAmount(compressed_money_amount));
diff --git a/src/test/fuzz/p2p_headers_presync.cpp b/src/test/fuzz/p2p_headers_presync.cpp
new file mode 100644
index 0000000000..b86d03442d
--- /dev/null
+++ b/src/test/fuzz/p2p_headers_presync.cpp
@@ -0,0 +1,200 @@
+#include <blockencodings.h>
+#include <net.h>
+#include <net_processing.h>
+#include <netmessagemaker.h>
+#include <node/peerman_args.h>
+#include <pow.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <test/util/net.h>
+#include <test/util/script.h>
+#include <test/util/setup_common.h>
+#include <validation.h>
+
+namespace {
+constexpr uint32_t FUZZ_MAX_HEADERS_RESULTS{16};
+
+class HeadersSyncSetup : public TestingSetup
+{
+ std::vector<CNode*> m_connections;
+
+public:
+ HeadersSyncSetup(const ChainType chain_type = ChainType::MAIN,
+ TestOpts opts = {})
+ : TestingSetup(chain_type, opts)
+ {
+ PeerManager::Options peerman_opts;
+ node::ApplyArgsManOptions(*m_node.args, peerman_opts);
+ peerman_opts.max_headers_result = FUZZ_MAX_HEADERS_RESULTS;
+ m_node.peerman = PeerManager::make(*m_node.connman, *m_node.addrman,
+ m_node.banman.get(), *m_node.chainman,
+ *m_node.mempool, *m_node.warnings, peerman_opts);
+
+ CConnman::Options options;
+ options.m_msgproc = m_node.peerman.get();
+ m_node.connman->Init(options);
+ }
+
+ void ResetAndInitialize() EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
+ void SendMessage(FuzzedDataProvider& fuzzed_data_provider, CSerializedNetMsg&& msg)
+ EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
+};
+
+void HeadersSyncSetup::ResetAndInitialize()
+{
+ m_connections.clear();
+ auto& connman = static_cast<ConnmanTestMsg&>(*m_node.connman);
+ connman.StopNodes();
+
+ NodeId id{0};
+ std::vector<ConnectionType> conn_types = {
+ ConnectionType::OUTBOUND_FULL_RELAY,
+ ConnectionType::BLOCK_RELAY,
+ ConnectionType::INBOUND
+ };
+
+ for (auto conn_type : conn_types) {
+ CAddress addr{};
+ m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false));
+ CNode& p2p_node = *m_connections.back();
+
+ connman.Handshake(
+ /*node=*/p2p_node,
+ /*successfully_connected=*/true,
+ /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS),
+ /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS),
+ /*version=*/PROTOCOL_VERSION,
+ /*relay_txs=*/true);
+
+ connman.AddTestNode(p2p_node);
+ }
+}
+
+void HeadersSyncSetup::SendMessage(FuzzedDataProvider& fuzzed_data_provider, CSerializedNetMsg&& msg)
+{
+ auto& connman = static_cast<ConnmanTestMsg&>(*m_node.connman);
+ CNode& connection = *PickValue(fuzzed_data_provider, m_connections);
+
+ connman.FlushSendBuffer(connection);
+ (void)connman.ReceiveMsgFrom(connection, std::move(msg));
+ connection.fPauseSend = false;
+ try {
+ connman.ProcessMessagesOnce(connection);
+ } catch (const std::ios_base::failure&) {
+ }
+ m_node.peerman->SendMessages(&connection);
+}
+
+CBlockHeader ConsumeHeader(FuzzedDataProvider& fuzzed_data_provider, const uint256& prev_hash, uint32_t prev_nbits)
+{
+ CBlockHeader header;
+ header.nNonce = 0;
+ // Either use the previous difficulty or let the fuzzer choose
+ header.nBits = fuzzed_data_provider.ConsumeBool() ?
+ prev_nbits :
+ fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(0x17058EBE, 0x1D00FFFF);
+ header.nTime = ConsumeTime(fuzzed_data_provider);
+ header.hashPrevBlock = prev_hash;
+ header.nVersion = fuzzed_data_provider.ConsumeIntegral<int32_t>();
+ return header;
+}
+
+CBlock ConsumeBlock(FuzzedDataProvider& fuzzed_data_provider, const uint256& prev_hash, uint32_t prev_nbits)
+{
+ auto header = ConsumeHeader(fuzzed_data_provider, prev_hash, prev_nbits);
+ // In order to reach the headers acceptance logic, the block is
+ // constructed in a way that will pass the mutation checks.
+ CBlock block{header};
+ CMutableTransaction tx;
+ tx.vin.resize(1);
+ tx.vout.resize(1);
+ tx.vout[0].nValue = 0;
+ tx.vin[0].scriptSig.resize(2);
+ block.vtx.push_back(MakeTransactionRef(tx));
+ block.hashMerkleRoot = block.vtx[0]->GetHash();
+ return block;
+}
+
+void FinalizeHeader(CBlockHeader& header)
+{
+ while (!CheckProofOfWork(header.GetHash(), header.nBits, Params().GetConsensus())) {
+ ++(header.nNonce);
+ }
+}
+
+// Global setup works for this test as state modification (specifically in the
+// block index) would indicate a bug.
+HeadersSyncSetup* g_testing_setup;
+
+void initialize()
+{
+ static auto setup = MakeNoLogFileContext<HeadersSyncSetup>(ChainType::MAIN, {.extra_args = {"-checkpoints=0"}});
+ g_testing_setup = setup.get();
+}
+} // namespace
+
+FUZZ_TARGET(p2p_headers_presync, .init = initialize)
+{
+ ChainstateManager& chainman = *g_testing_setup->m_node.chainman;
+
+ LOCK(NetEventsInterface::g_msgproc_mutex);
+
+ g_testing_setup->ResetAndInitialize();
+
+ FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
+
+ CBlockHeader base{Params().GenesisBlock()};
+ SetMockTime(base.nTime);
+
+ // The chain is just a single block, so this is equal to 1
+ size_t original_index_size{WITH_LOCK(cs_main, return chainman.m_blockman.m_block_index.size())};
+
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100)
+ {
+ auto finalized_block = [&]() {
+ CBlock block = ConsumeBlock(fuzzed_data_provider, base.GetHash(), base.nBits);
+ FinalizeHeader(block);
+ return block;
+ };
+
+ // Send low-work headers, compact blocks, and blocks
+ CallOneOf(
+ fuzzed_data_provider,
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send FUZZ_MAX_HEADERS_RESULTS headers
+ std::vector<CBlock> headers;
+ headers.resize(FUZZ_MAX_HEADERS_RESULTS);
+ for (CBlock& header : headers) {
+ header = ConsumeHeader(fuzzed_data_provider, base.GetHash(), base.nBits);
+ FinalizeHeader(header);
+ base = header;
+ }
+
+ auto headers_msg = NetMsg::Make(NetMsgType::HEADERS, TX_WITH_WITNESS(headers));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ },
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send a compact block
+ auto block = finalized_block();
+ CBlockHeaderAndShortTxIDs cmpct_block{block, fuzzed_data_provider.ConsumeIntegral<uint64_t>()};
+
+ auto headers_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, TX_WITH_WITNESS(cmpct_block));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ },
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send a block
+ auto block = finalized_block();
+
+ auto headers_msg = NetMsg::Make(NetMsgType::BLOCK, TX_WITH_WITNESS(block));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ });
+ }
+
+ // The headers/blocks sent in this test should never be stored, as the chains don't have the work required
+ // to meet the anti-DoS work threshold. So, if at any point the block index grew in size, then there's a bug
+ // in the headers pre-sync logic.
+ assert(WITH_LOCK(cs_main, return chainman.m_blockman.m_block_index.size()) == original_index_size);
+
+ g_testing_setup->m_node.validation_signals->SyncWithValidationInterfaceQueue();
+}
diff --git a/src/test/fuzz/pow.cpp b/src/test/fuzz/pow.cpp
index 05cdb740e4..dba999ce4f 100644
--- a/src/test/fuzz/pow.cpp
+++ b/src/test/fuzz/pow.cpp
@@ -80,7 +80,7 @@ FUZZ_TARGET(pow, .init = initialize_pow)
{
const std::optional<uint256> hash = ConsumeDeserializable<uint256>(fuzzed_data_provider);
if (hash) {
- (void)CheckProofOfWork(*hash, fuzzed_data_provider.ConsumeIntegral<unsigned int>(), consensus_params);
+ (void)CheckProofOfWorkImpl(*hash, fuzzed_data_provider.ConsumeIntegral<unsigned int>(), consensus_params);
}
}
}
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 9217f05945..777122df6d 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -261,7 +261,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file)
for (uint8_t j = 0; j < 40; ++j) {
file << j;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
// The buffer size (second arg) must be greater than the rewind
// amount (third arg).
@@ -391,7 +391,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_skip)
for (uint8_t j = 0; j < 40; ++j) {
file << j;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
// The buffer is 25 bytes, allow rewinding 10 bytes.
BufferedFile bf{file, 25, 10};
@@ -444,7 +444,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
for (uint8_t i = 0; i < fileSize; ++i) {
file << i;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
size_t bufSize = m_rng.randrange(300) + 1;
size_t rewindSize = m_rng.randrange(bufSize);
diff --git a/src/test/util/cluster_linearize.h b/src/test/util/cluster_linearize.h
index 9477d2ed41..b86ebcd78b 100644
--- a/src/test/util/cluster_linearize.h
+++ b/src/test/util/cluster_linearize.h
@@ -102,7 +102,7 @@ bool IsAcyclic(const DepGraph<SetType>& depgraph) noexcept
struct DepGraphFormatter
{
/** Convert x>=0 to 2x (even), x<0 to -2x-1 (odd). */
- static uint64_t SignedToUnsigned(int64_t x) noexcept
+ [[maybe_unused]] static uint64_t SignedToUnsigned(int64_t x) noexcept
{
if (x < 0) {
return 2 * uint64_t(-(x + 1)) + 1;
@@ -112,7 +112,7 @@ struct DepGraphFormatter
}
/** Convert even x to x/2 (>=0), odd x to -(x/2)-1 (<0). */
- static int64_t UnsignedToSigned(uint64_t x) noexcept
+ [[maybe_unused]] static int64_t UnsignedToSigned(uint64_t x) noexcept
{
if (x & 1) {
return -int64_t(x / 2) - 1;
@@ -186,7 +186,7 @@ struct DepGraphFormatter
/** The dependency graph which we deserialize into first, with transactions in
* topological serialization order, not original cluster order. */
DepGraph<SetType> topo_depgraph;
- /** Mapping from cluster order to serialization order, used later to reconstruct the
+ /** Mapping from serialization order to cluster order, used later to reconstruct the
* cluster order. */
std::vector<ClusterIndex> reordering;
@@ -205,9 +205,9 @@ struct DepGraphFormatter
coded_fee &= 0xFFFFFFFFFFFFF; // Enough for fee between -21M...21M BTC.
static_assert(0xFFFFFFFFFFFFF > uint64_t{2} * 21000000 * 100000000);
auto fee = UnsignedToSigned(coded_fee);
- // Extend topo_depgraph with the new transaction (at the end).
+ // Extend topo_depgraph with the new transaction (preliminarily at the end).
auto topo_idx = topo_depgraph.AddTransaction({fee, size});
- reordering.push_back(topo_idx);
+ reordering.push_back(reordering.size());
// Read dependency information.
uint64_t diff = 0; //!< How many potential parents we have to skip.
s >> VARINT(diff);
@@ -226,31 +226,23 @@ struct DepGraphFormatter
--diff;
}
}
- // If we reach this point, we can interpret the remaining skip value as how far from the
- // end of reordering topo_idx should be placed (wrapping around), so move it to its
- // correct location. The preliminary reordering.push_back(topo_idx) above was to make
- // sure that if a deserialization exception occurs, topo_idx still appears somewhere.
+ // If we reach this point, we can interpret the remaining skip value as how far
+ // from the end of reordering the new transaction should be placed (wrapping
+ // around), so remove the preliminary position it was put in above (which was to
+ // make sure that if a deserialization exception occurs, the new transaction still
+ // has some entry in reordering).
reordering.pop_back();
- reordering.insert(reordering.end() - (diff % (reordering.size() + 1)), topo_idx);
+ ClusterIndex insert_distance = diff % (reordering.size() + 1);
+ // And then update reordering to reflect this new transaction's insertion.
+ for (auto& pos : reordering) {
+ pos += (pos >= reordering.size() - insert_distance);
+ }
+ reordering.push_back(reordering.size() - insert_distance);
}
} catch (const std::ios_base::failure&) {}
// Construct the original cluster order depgraph.
- depgraph = {};
- // Add transactions to depgraph in the original cluster order.
- for (auto topo_idx : reordering) {
- depgraph.AddTransaction(topo_depgraph.FeeRate(topo_idx));
- }
- // Translate dependencies from topological to cluster order.
- for (ClusterIndex idx = 0; idx < reordering.size(); ++idx) {
- ClusterIndex topo_idx = reordering[idx];
- for (ClusterIndex dep_idx = 0; dep_idx < reordering.size(); ++dep_idx) {
- ClusterIndex dep_topo_idx = reordering[dep_idx];
- if (topo_depgraph.Ancestors(topo_idx)[dep_topo_idx]) {
- depgraph.AddDependency(dep_idx, idx);
- }
- }
- }
+ depgraph = DepGraph(topo_depgraph, reordering);
}
};
diff --git a/src/txdb.h b/src/txdb.h
index e0acb09e98..412d6c6009 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -25,8 +25,6 @@ class uint256;
static const int64_t nDefaultDbCache = 450;
//! -dbbatchsize default (bytes)
static const int64_t nDefaultDbBatchSize = 16 << 20;
-//! max. -dbcache (MiB)
-static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB)
static const int64_t nMinDbCache = 4;
//! Max memory allocated to block tree DB specific cache, if no -txindex (MiB)
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
index f50cd8a28c..04b0673c49 100644
--- a/src/util/asmap.cpp
+++ b/src/util/asmap.cpp
@@ -203,10 +203,10 @@ std::vector<bool> DecodeAsmap(fs::path path)
LogPrintf("Failed to open asmap file from disk\n");
return bits;
}
- fseek(filestr, 0, SEEK_END);
- int length = ftell(filestr);
+ file.seek(0, SEEK_END);
+ int length = file.tell();
LogPrintf("Opened asmap file %s (%d bytes) from disk\n", fs::quoted(fs::PathToString(path)), length);
- fseek(filestr, 0, SEEK_SET);
+ file.seek(0, SEEK_SET);
uint8_t cur_byte;
for (int i = 0; i < length; ++i) {
file >> cur_byte;
diff --git a/src/validation.cpp b/src/validation.cpp
index 8e4ea8eda2..38fcda55af 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -6014,7 +6014,7 @@ util::Result<void> ChainstateManager::PopulateAndValidateSnapshot(
index = snapshot_chainstate.m_chain[i];
// Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload()
- // won't ask to rewind the entire assumed-valid chain on startup.
+ // won't ask for -reindex on startup.
if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) {
index->nStatus |= BLOCK_OPT_WITNESS;
}
diff --git a/src/validation.h b/src/validation.h
index 059ae52bdf..6bc8a14de9 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -42,7 +42,6 @@
#include <span>
#include <stdint.h>
#include <string>
-#include <thread>
#include <type_traits>
#include <utility>
#include <vector>
@@ -1008,7 +1007,6 @@ public:
const util::SignalInterrupt& m_interrupt;
const Options m_options;
- std::thread m_thread_load;
//! A single BlockManager instance is shared across each constructed
//! chainstate to avoid duplicating block metadata.
node::BlockManager m_blockman;