aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.bench.include2
-rw-r--r--src/Makefile.test.include6
-rw-r--r--src/addrman.h2
-rw-r--r--src/base58.cpp4
-rw-r--r--src/bench/addrman.cpp36
-rw-r--r--src/bench/base58.cpp24
-rw-r--r--src/bench/bech32.cpp17
-rw-r--r--src/bench/bench.cpp140
-rw-r--r--src/bench/bench.h116
-rw-r--r--src/bench/bench_bitcoin.cpp64
-rw-r--r--src/bench/block_assemble.cpp8
-rw-r--r--src/bench/ccoins_caching.cpp8
-rw-r--r--src/bench/chacha20.cpp24
-rw-r--r--src/bench/chacha_poly_aead.cpp67
-rw-r--r--src/bench/checkblock.cpp16
-rw-r--r--src/bench/checkqueue.cpp25
-rw-r--r--src/bench/coin_selection.cpp16
-rw-r--r--src/bench/crypto_hash.cpp68
-rw-r--r--src/bench/duplicate_inputs.cpp8
-rw-r--r--src/bench/examples.cpp20
-rw-r--r--src/bench/gcs_filter.cpp16
-rw-r--r--src/bench/hashpadding.cpp16
-rw-r--r--src/bench/lockedpool.cpp31
-rw-r--r--src/bench/mempool_eviction.cpp8
-rw-r--r--src/bench/mempool_stress.cpp15
-rw-r--r--src/bench/merkle_root.cpp8
-rw-r--r--src/bench/nanobench.cpp6
-rw-r--r--src/bench/nanobench.h3225
-rw-r--r--src/bench/poly1305.cpp23
-rw-r--r--src/bench/prevector.cpp91
-rw-r--r--src/bench/rollingbloom.cpp16
-rw-r--r--src/bench/rpc_blockchain.cpp9
-rw-r--r--src/bench/rpc_mempool.cpp8
-rw-r--r--src/bench/util_time.cpp32
-rw-r--r--src/bench/verify_script.cpp21
-rw-r--r--src/bench/wallet_balance.cpp22
-rw-r--r--src/bitcoin-cli.cpp42
-rw-r--r--src/bitcoin-tx.cpp46
-rw-r--r--src/bitcoin-wallet.cpp22
-rw-r--r--src/blockfilter.cpp8
-rw-r--r--src/chainparams.cpp4
-rw-r--r--src/chainparamsbase.cpp12
-rw-r--r--src/chainparamsbase.h4
-rw-r--r--src/coins.cpp8
-rw-r--r--src/coins.h7
-rw-r--r--src/dummywallet.cpp6
-rw-r--r--src/hash.cpp2
-rw-r--r--src/hash.h59
-rw-r--r--src/init.cpp282
-rw-r--r--src/key.cpp2
-rw-r--r--src/merkleblock.cpp4
-rw-r--r--src/net.cpp25
-rw-r--r--src/net.h34
-rw-r--r--src/net_permissions.cpp3
-rw-r--r--src/net_permissions.h4
-rw-r--r--src/net_processing.cpp136
-rw-r--r--src/netaddress.cpp81
-rw-r--r--src/netaddress.h61
-rw-r--r--src/primitives/transaction.h15
-rw-r--r--src/protocol.cpp6
-rw-r--r--src/protocol.h32
-rw-r--r--src/pubkey.h4
-rw-r--r--src/qt/bitcoin.cpp16
-rw-r--r--src/qt/test/apptests.cpp2
-rw-r--r--src/rpc/blockchain.cpp12
-rw-r--r--src/rpc/rawtransaction.cpp2
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/script/standard.cpp6
-rw-r--r--src/script/standard.h3
-rw-r--r--src/span.h12
-rw-r--r--src/sync.cpp22
-rw-r--r--src/sync.h14
-rw-r--r--src/test/crypto_tests.cpp2
-rw-r--r--src/test/fuzz/crypto.cpp11
-rw-r--r--src/test/fuzz/key.cpp2
-rw-r--r--src/test/fuzz/net_permissions.cpp1
-rw-r--r--src/test/key_tests.cpp10
-rw-r--r--src/test/merkle_tests.cpp13
-rw-r--r--src/test/netbase_tests.cpp16
-rw-r--r--src/test/script_standard_tests.cpp2
-rw-r--r--src/test/script_tests.cpp2
-rw-r--r--src/test/serialize_tests.cpp4
-rw-r--r--src/test/settings_tests.cpp2
-rw-r--r--src/test/sync_tests.cpp6
-rw-r--r--src/test/system_tests.cpp95
-rw-r--r--src/test/util/setup_common.cpp4
-rw-r--r--src/test/util_tests.cpp8
-rw-r--r--src/test/validation_chainstate_tests.cpp74
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp62
-rw-r--r--src/test/validation_flush_tests.cpp24
-rw-r--r--src/txdb.cpp43
-rw-r--r--src/txdb.h10
-rw-r--r--src/txmempool.cpp6
-rw-r--r--src/txmempool.h12
-rw-r--r--src/uint256.cpp10
-rw-r--r--src/uint256.h29
-rw-r--r--src/util/system.cpp43
-rw-r--r--src/util/system.h12
-rw-r--r--src/validation.cpp87
-rw-r--r--src/validation.h32
-rw-r--r--src/wallet/bdb.cpp170
-rw-r--r--src/wallet/bdb.h22
-rw-r--r--src/wallet/db.h41
-rw-r--r--src/wallet/init.cpp50
-rw-r--r--src/wallet/rpcwallet.cpp2
-rw-r--r--src/wallet/test/wallet_tests.cpp4
-rw-r--r--src/wallet/walletdb.cpp14
-rw-r--r--src/walletinitinterface.h4
108 files changed, 4891 insertions, 1258 deletions
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 93b5156af3..c224ca7bf6 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -33,6 +33,8 @@ bench_bench_bitcoin_SOURCES = \
bench/merkle_root.cpp \
bench/mempool_eviction.cpp \
bench/mempool_stress.cpp \
+ bench/nanobench.h \
+ bench/nanobench.cpp \
bench/rpc_blockchain.cpp \
bench/rpc_mempool.cpp \
bench/util_time.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 637d1d2f6e..085ebdcb8e 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -265,6 +265,7 @@ BITCOIN_TESTS =\
test/skiplist_tests.cpp \
test/streams_tests.cpp \
test/sync_tests.cpp \
+ test/system_tests.cpp \
test/util_threadnames_tests.cpp \
test/timedata_tests.cpp \
test/torcontrol_tests.cpp \
@@ -275,6 +276,7 @@ BITCOIN_TESTS =\
test/uint256_tests.cpp \
test/util_tests.cpp \
test/validation_block_tests.cpp \
+ test/validation_chainstate_tests.cpp \
test/validation_chainstatemanager_tests.cpp \
test/validation_flush_tests.cpp \
test/validationinterface_tests.cpp \
@@ -1236,8 +1238,8 @@ endif
if TARGET_WINDOWS
else
if ENABLE_BENCH
- @echo "Running bench/bench_bitcoin -evals=1 -scaling=0..."
- $(BENCH_BINARY) -evals=1 -scaling=0 > /dev/null
+ @echo "Running bench/bench_bitcoin ..."
+ $(BENCH_BINARY) > /dev/null
endif
endif
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check
diff --git a/src/addrman.h b/src/addrman.h
index 8e82020df0..9e742339db 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -157,7 +157,7 @@ public:
#define ADDRMAN_GETADDR_MAX_PCT 23
//! the maximum number of nodes to return in a getaddr call
-#define ADDRMAN_GETADDR_MAX 2500
+#define ADDRMAN_GETADDR_MAX 1000
//! Convenience
#define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2)
diff --git a/src/base58.cpp b/src/base58.cpp
index 6a9e21ffc2..9b2946e7a9 100644
--- a/src/base58.cpp
+++ b/src/base58.cpp
@@ -141,7 +141,7 @@ std::string EncodeBase58Check(const std::vector<unsigned char>& vchIn)
{
// add 4-byte hash check to the end
std::vector<unsigned char> vch(vchIn);
- uint256 hash = Hash(vch.begin(), vch.end());
+ uint256 hash = Hash(vch);
vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4);
return EncodeBase58(vch);
}
@@ -154,7 +154,7 @@ bool DecodeBase58Check(const char* psz, std::vector<unsigned char>& vchRet, int
return false;
}
// re-calculate the checksum, ensure it matches the included 4-byte checksum
- uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4);
+ uint256 hash = Hash(MakeSpan(vchRet).first(vchRet.size() - 4));
if (memcmp(&hash, &vchRet[vchRet.size() - 4], 4) != 0) {
vchRet.clear();
return false;
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index cc260df2b8..26d9340768 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -67,52 +67,52 @@ static void FillAddrMan(CAddrMan& addrman)
/* Benchmarks */
-static void AddrManAdd(benchmark::State& state)
+static void AddrManAdd(benchmark::Bench& bench)
{
CreateAddresses();
CAddrMan addrman;
- while (state.KeepRunning()) {
+ bench.run([&] {
AddAddressesToAddrMan(addrman);
addrman.Clear();
- }
+ });
}
-static void AddrManSelect(benchmark::State& state)
+static void AddrManSelect(benchmark::Bench& bench)
{
CAddrMan addrman;
FillAddrMan(addrman);
- while (state.KeepRunning()) {
+ bench.run([&] {
const auto& address = addrman.Select();
assert(address.GetPort() > 0);
- }
+ });
}
-static void AddrManGetAddr(benchmark::State& state)
+static void AddrManGetAddr(benchmark::Bench& bench)
{
CAddrMan addrman;
FillAddrMan(addrman);
- while (state.KeepRunning()) {
+ bench.run([&] {
const auto& addresses = addrman.GetAddr();
assert(addresses.size() > 0);
- }
+ });
}
-static void AddrManGood(benchmark::State& state)
+static void AddrManGood(benchmark::Bench& bench)
{
/* Create many CAddrMan objects - one to be modified at each loop iteration.
* This is necessary because the CAddrMan::Good() method modifies the
* object, affecting the timing of subsequent calls to the same method and
* we want to do the same amount of work in every loop iteration. */
- const uint64_t numLoops = state.m_num_iters * state.m_num_evals;
+ bench.epochs(5).epochIterations(1);
- std::vector<CAddrMan> addrmans(numLoops);
+ std::vector<CAddrMan> addrmans(bench.epochs() * bench.epochIterations());
for (auto& addrman : addrmans) {
FillAddrMan(addrman);
}
@@ -128,13 +128,13 @@ static void AddrManGood(benchmark::State& state)
};
uint64_t i = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
markSomeAsGood(addrmans.at(i));
++i;
- }
+ });
}
-BENCHMARK(AddrManAdd, 5);
-BENCHMARK(AddrManSelect, 1000000);
-BENCHMARK(AddrManGetAddr, 500);
-BENCHMARK(AddrManGood, 2);
+BENCHMARK(AddrManAdd);
+BENCHMARK(AddrManSelect);
+BENCHMARK(AddrManGetAddr);
+BENCHMARK(AddrManGood);
diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp
index 0690483d50..00544cba31 100644
--- a/src/bench/base58.cpp
+++ b/src/bench/base58.cpp
@@ -10,7 +10,7 @@
#include <vector>
-static void Base58Encode(benchmark::State& state)
+static void Base58Encode(benchmark::Bench& bench)
{
static const std::array<unsigned char, 32> buff = {
{
@@ -19,13 +19,13 @@ static void Base58Encode(benchmark::State& state)
200, 24
}
};
- while (state.KeepRunning()) {
+ bench.batch(buff.size()).unit("byte").run([&] {
EncodeBase58(buff.data(), buff.data() + buff.size());
- }
+ });
}
-static void Base58CheckEncode(benchmark::State& state)
+static void Base58CheckEncode(benchmark::Bench& bench)
{
static const std::array<unsigned char, 32> buff = {
{
@@ -36,22 +36,22 @@ static void Base58CheckEncode(benchmark::State& state)
};
std::vector<unsigned char> vch;
vch.assign(buff.begin(), buff.end());
- while (state.KeepRunning()) {
+ bench.batch(buff.size()).unit("byte").run([&] {
EncodeBase58Check(vch);
- }
+ });
}
-static void Base58Decode(benchmark::State& state)
+static void Base58Decode(benchmark::Bench& bench)
{
const char* addr = "17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem";
std::vector<unsigned char> vch;
- while (state.KeepRunning()) {
+ bench.batch(strlen(addr)).unit("byte").run([&] {
(void) DecodeBase58(addr, vch, 64);
- }
+ });
}
-BENCHMARK(Base58Encode, 470 * 1000);
-BENCHMARK(Base58CheckEncode, 320 * 1000);
-BENCHMARK(Base58Decode, 800 * 1000);
+BENCHMARK(Base58Encode);
+BENCHMARK(Base58CheckEncode);
+BENCHMARK(Base58Decode);
diff --git a/src/bench/bech32.cpp b/src/bench/bech32.cpp
index 2107840a3a..c74d8d51b3 100644
--- a/src/bench/bech32.cpp
+++ b/src/bench/bech32.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
+#include <bench/nanobench.h>
#include <bech32.h>
#include <util/strencodings.h>
@@ -11,26 +12,26 @@
#include <vector>
-static void Bech32Encode(benchmark::State& state)
+static void Bech32Encode(benchmark::Bench& bench)
{
std::vector<uint8_t> v = ParseHex("c97f5a67ec381b760aeaf67573bc164845ff39a3bb26a1cee401ac67243b48db");
std::vector<unsigned char> tmp = {0};
tmp.reserve(1 + 32 * 8 / 5);
ConvertBits<8, 5, true>([&](unsigned char c) { tmp.push_back(c); }, v.begin(), v.end());
- while (state.KeepRunning()) {
+ bench.batch(v.size()).unit("byte").run([&] {
bech32::Encode("bc", tmp);
- }
+ });
}
-static void Bech32Decode(benchmark::State& state)
+static void Bech32Decode(benchmark::Bench& bench)
{
std::string addr = "bc1qkallence7tjawwvy0dwt4twc62qjgaw8f4vlhyd006d99f09";
- while (state.KeepRunning()) {
+ bench.batch(addr.size()).unit("byte").run([&] {
bech32::Decode(addr);
- }
+ });
}
-BENCHMARK(Bech32Encode, 800 * 1000);
-BENCHMARK(Bech32Decode, 800 * 1000);
+BENCHMARK(Bech32Encode);
+BENCHMARK(Bech32Decode);
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index 7b93ef688d..01466d0b6f 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -8,141 +8,73 @@
#include <test/util/setup_common.h>
#include <validation.h>
-#include <algorithm>
-#include <assert.h>
-#include <iomanip>
-#include <iostream>
-#include <numeric>
#include <regex>
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
-void benchmark::ConsolePrinter::header()
-{
- std::cout << "# Benchmark, evals, iterations, total, min, max, median" << std::endl;
-}
+namespace {
-void benchmark::ConsolePrinter::result(const State& state)
+void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& benchmarkResults, const std::string& filename, const char* tpl)
{
- auto results = state.m_elapsed_results;
- std::sort(results.begin(), results.end());
-
- double total = state.m_num_iters * std::accumulate(results.begin(), results.end(), 0.0);
-
- double front = 0;
- double back = 0;
- double median = 0;
-
- if (!results.empty()) {
- front = results.front();
- back = results.back();
-
- size_t mid = results.size() / 2;
- median = results[mid];
- if (0 == results.size() % 2) {
- median = (results[mid] + results[mid + 1]) / 2;
- }
+ if (benchmarkResults.empty() || filename.empty()) {
+ // nothing to write, bail out
+ return;
}
-
- std::cout << std::setprecision(6);
- std::cout << state.m_name << ", " << state.m_num_evals << ", " << state.m_num_iters << ", " << total << ", " << front << ", " << back << ", " << median << std::endl;
-}
-
-void benchmark::ConsolePrinter::footer() {}
-benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height)
- : m_plotly_url(plotly_url), m_width(width), m_height(height)
-{
-}
-
-void benchmark::PlotlyPrinter::header()
-{
- std::cout << "<html><head>"
- << "<script src=\"" << m_plotly_url << "\"></script>"
- << "</head><body><div id=\"myDiv\" style=\"width:" << m_width << "px; height:" << m_height << "px\"></div>"
- << "<script> var data = ["
- << std::endl;
-}
-
-void benchmark::PlotlyPrinter::result(const State& state)
-{
- std::cout << "{ " << std::endl
- << " name: '" << state.m_name << "', " << std::endl
- << " y: [";
-
- const char* prefix = "";
- for (const auto& e : state.m_elapsed_results) {
- std::cout << prefix << std::setprecision(6) << e;
- prefix = ", ";
+ std::ofstream fout(filename);
+ if (fout.is_open()) {
+ ankerl::nanobench::render(tpl, benchmarkResults, fout);
+ } else {
+ std::cout << "Could write to file '" << filename << "'" << std::endl;
}
- std::cout << "]," << std::endl
- << " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box',"
- << std::endl
- << "}," << std::endl;
-}
-void benchmark::PlotlyPrinter::footer()
-{
- std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: 'tozero', autorange: true } };"
- << "Plotly.newPlot('myDiv', data, layout);"
- << "</script></body></html>";
+ std::cout << "Created '" << filename << "'" << std::endl;
}
+} // namespace
benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks()
{
- static std::map<std::string, Bench> benchmarks_map;
+ static std::map<std::string, BenchFunction> benchmarks_map;
return benchmarks_map;
}
-benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second)
+benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func)
{
- benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second}));
+ benchmarks().insert(std::make_pair(name, func));
}
-void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only)
+void benchmark::BenchRunner::RunAll(const Args& args)
{
- if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
- std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
- }
-#ifdef DEBUG
- std::cerr << "WARNING: This is a debug build - may result in slower benchmarks.\n";
-#endif
-
- std::regex reFilter(filter);
+ std::regex reFilter(args.regex_filter);
std::smatch baseMatch;
- printer.header();
-
+ std::vector<ankerl::nanobench::Result> benchmarkResults;
for (const auto& p : benchmarks()) {
if (!std::regex_match(p.first, baseMatch, reFilter)) {
continue;
}
- uint64_t num_iters = static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling);
- if (0 == num_iters) {
- num_iters = 1;
- }
- State state(p.first, num_evals, num_iters, printer);
- if (!is_list_only) {
- p.second.func(state);
+ if (args.is_list_only) {
+ std::cout << p.first << std::endl;
+ continue;
}
- printer.result(state);
- }
-
- printer.footer();
-}
-
-bool benchmark::State::UpdateTimer(const benchmark::time_point current_time)
-{
- if (m_start_time != time_point()) {
- std::chrono::duration<double> diff = current_time - m_start_time;
- m_elapsed_results.push_back(diff.count() / m_num_iters);
- if (m_elapsed_results.size() == m_num_evals) {
- return false;
+ Bench bench;
+ bench.name(p.first);
+ if (args.asymptote.empty()) {
+ p.second(bench);
+ } else {
+ for (auto n : args.asymptote) {
+ bench.complexityN(n);
+ p.second(bench);
+ }
+ std::cout << bench.complexityBigO() << std::endl;
}
+ benchmarkResults.push_back(bench.results().back());
}
- m_num_iters_left = m_num_iters - 1;
- return true;
+ GenerateTemplateResults(benchmarkResults, args.output_csv, "# Benchmark, evals, iterations, total, min, max, median\n"
+ "{{#result}}{{name}}, {{epochs}}, {{average(iterations)}}, {{sumProduct(iterations, elapsed)}}, {{minimum(elapsed)}}, {{maximum(elapsed)}}, {{median(elapsed)}}\n"
+ "{{/result}}");
+ GenerateTemplateResults(benchmarkResults, args.output_json, ankerl::nanobench::templates::json());
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index 629bca9a73..bafc7f8716 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -11,131 +11,53 @@
#include <string>
#include <vector>
+#include <bench/nanobench.h>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/stringize.hpp>
-// Simple micro-benchmarking framework; API mostly matches a subset of the Google Benchmark
-// framework (see https://github.com/google/benchmark)
-// Why not use the Google Benchmark framework? Because adding Yet Another Dependency
-// (that uses cmake as its build system and has lots of features we don't need) isn't
-// worth it.
-
/*
* Usage:
-static void CODE_TO_TIME(benchmark::State& state)
+static void CODE_TO_TIME(benchmark::Bench& bench)
{
... do any setup needed...
- while (state.KeepRunning()) {
+ nanobench::Config().run([&] {
... do stuff you want to time...
- }
+ });
... do any cleanup needed...
}
-// default to running benchmark for 5000 iterations
-BENCHMARK(CODE_TO_TIME, 5000);
+BENCHMARK(CODE_TO_TIME);
*/
namespace benchmark {
-// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
-struct best_clock {
- using hi_res_clock = std::chrono::high_resolution_clock;
- using steady_clock = std::chrono::steady_clock;
- using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
-};
-using clock = best_clock::type;
-using time_point = clock::time_point;
-using duration = clock::duration;
-
-class Printer;
-
-class State
-{
-public:
- std::string m_name;
- uint64_t m_num_iters_left;
- const uint64_t m_num_iters;
- const uint64_t m_num_evals;
- std::vector<double> m_elapsed_results;
- time_point m_start_time;
- bool UpdateTimer(time_point finish_time);
+using ankerl::nanobench::Bench;
- State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals)
- {
- }
+typedef std::function<void(Bench&)> BenchFunction;
- inline bool KeepRunning()
- {
- if (m_num_iters_left--) {
- return true;
- }
-
- bool result = UpdateTimer(clock::now());
- // measure again so runtime of UpdateTimer is not included
- m_start_time = clock::now();
- return result;
- }
+struct Args {
+ std::string regex_filter;
+ bool is_list_only;
+ std::vector<double> asymptote;
+ std::string output_csv;
+ std::string output_json;
};
-typedef std::function<void(State&)> BenchFunction;
-
class BenchRunner
{
- struct Bench {
- BenchFunction func;
- uint64_t num_iters_for_one_second;
- };
- typedef std::map<std::string, Bench> BenchmarkMap;
+ typedef std::map<std::string, BenchFunction> BenchmarkMap;
static BenchmarkMap& benchmarks();
public:
- BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second);
-
- static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only);
-};
+ BenchRunner(std::string name, BenchFunction func);
-// interface to output benchmark results.
-class Printer
-{
-public:
- virtual ~Printer() {}
- virtual void header() = 0;
- virtual void result(const State& state) = 0;
- virtual void footer() = 0;
-};
-
-// default printer to console, shows min, max, median.
-class ConsolePrinter : public Printer
-{
-public:
- void header() override;
- void result(const State& state) override;
- void footer() override;
-};
-
-// creates box plot with plotly.js
-class PlotlyPrinter : public Printer
-{
-public:
- PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height);
- void header() override;
- void result(const State& state) override;
- void footer() override;
-
-private:
- std::string m_plotly_url;
- int64_t m_width;
- int64_t m_height;
+ static void RunAll(const Args& args);
};
}
-
-
-// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations);
-// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately
-// the same time, and scaling factor can be used that the total time is appropriate for your system.
-#define BENCHMARK(n, num_iters_for_one_second) \
- benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second));
+// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo");
+#define BENCHMARK(n) \
+ benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n);
#endif // BITCOIN_BENCH_BENCH_H
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index 1f872ce700..135659f87f 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -10,26 +10,30 @@
#include <memory>
-static const int64_t DEFAULT_BENCH_EVALUATIONS = 5;
static const char* DEFAULT_BENCH_FILTER = ".*";
-static const char* DEFAULT_BENCH_SCALING = "1.0";
-static const char* DEFAULT_BENCH_PRINTER = "console";
-static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js";
-static const int64_t DEFAULT_PLOT_WIDTH = 1024;
-static const int64_t DEFAULT_PLOT_HEIGHT = 768;
static void SetupBenchArgs(ArgsManager& argsman)
{
SetupHelpOptions(argsman);
- argsman.AddArg("-list", "List benchmarks without executing them. Can be combined with -scaling and -filter", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-evals=<n>", strprintf("Number of measurement evaluations to perform. (default: %u)", DEFAULT_BENCH_EVALUATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-scaling=<n>", strprintf("Scaling factor for benchmark's runtime (default: %u)", DEFAULT_BENCH_SCALING), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-printer=(console|plot)", strprintf("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)", DEFAULT_BENCH_PRINTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-plotlyurl=<uri>", strprintf("URL to use for plotly.js (default: %s)", DEFAULT_PLOT_PLOTLYURL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-width=<x>", strprintf("Plot width in pixel (default: %u)", DEFAULT_PLOT_WIDTH), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-height=<x>", strprintf("Plot height in pixel (default: %u)", DEFAULT_PLOT_HEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-asymptote=n1,n2,n3,...", strprintf("Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+}
+
+// parses a comma separated list like "10,20,30,50"
+static std::vector<double> parseAsymptote(const std::string& str) {
+ std::stringstream ss(str);
+ std::vector<double> numbers;
+ double d;
+ char c;
+ while (ss >> d) {
+ numbers.push_back(d);
+ ss >> c;
+ }
+ return numbers;
}
int main(int argc, char** argv)
@@ -49,34 +53,14 @@ int main(int argc, char** argv)
return EXIT_SUCCESS;
}
- int64_t evaluations = argsman.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS);
- std::string regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
- std::string scaling_str = argsman.GetArg("-scaling", DEFAULT_BENCH_SCALING);
- bool is_list_only = argsman.GetBoolArg("-list", false);
-
- if (evaluations == 0) {
- return EXIT_SUCCESS;
- } else if (evaluations < 0) {
- tfm::format(std::cerr, "Error parsing evaluations argument: %d\n", evaluations);
- return EXIT_FAILURE;
- }
-
- double scaling_factor;
- if (!ParseDouble(scaling_str, &scaling_factor)) {
- tfm::format(std::cerr, "Error parsing scaling factor as double: %s\n", scaling_str);
- return EXIT_FAILURE;
- }
-
- std::unique_ptr<benchmark::Printer> printer = MakeUnique<benchmark::ConsolePrinter>();
- std::string printer_arg = argsman.GetArg("-printer", DEFAULT_BENCH_PRINTER);
- if ("plot" == printer_arg) {
- printer.reset(new benchmark::PlotlyPrinter(
- argsman.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL),
- argsman.GetArg("-plot-width", DEFAULT_PLOT_WIDTH),
- argsman.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT)));
- }
+ benchmark::Args args;
+ args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
+ args.is_list_only = argsman.GetBoolArg("-list", false);
+ args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", ""));
+ args.output_csv = argsman.GetArg("-output_csv", "");
+ args.output_json = argsman.GetArg("-output_json", "");
- benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only);
+ benchmark::BenchRunner::RunAll(args);
return EXIT_SUCCESS;
}
diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp
index 268f67cada..3f15f3f856 100644
--- a/src/bench/block_assemble.cpp
+++ b/src/bench/block_assemble.cpp
@@ -14,7 +14,7 @@
#include <vector>
-static void AssembleBlock(benchmark::State& state)
+static void AssembleBlock(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -54,9 +54,9 @@ static void AssembleBlock(benchmark::State& state)
}
}
- while (state.KeepRunning()) {
+ bench.run([&] {
PrepareBlock(test_setup.m_node, SCRIPT_PUB);
- }
+ });
}
-BENCHMARK(AssembleBlock, 700);
+BENCHMARK(AssembleBlock);
diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp
index 86f9a0bf67..116de98b14 100644
--- a/src/bench/ccoins_caching.cpp
+++ b/src/bench/ccoins_caching.cpp
@@ -16,7 +16,7 @@
// characteristics than e.g. reindex timings. But that's not a requirement of
// every benchmark."
// (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484)
-static void CCoinsCaching(benchmark::State& state)
+static void CCoinsCaching(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -44,11 +44,11 @@ static void CCoinsCaching(benchmark::State& state)
// Benchmark.
const CTransaction tx_1(t1);
- while (state.KeepRunning()) {
+ bench.run([&] {
bool success = AreInputsStandard(tx_1, coins);
assert(success);
- }
+ });
ECC_Stop();
}
-BENCHMARK(CCoinsCaching, 170 * 1000);
+BENCHMARK(CCoinsCaching);
diff --git a/src/bench/chacha20.cpp b/src/bench/chacha20.cpp
index f1b0a9a989..913e0f8d57 100644
--- a/src/bench/chacha20.cpp
+++ b/src/bench/chacha20.cpp
@@ -11,7 +11,7 @@ static const uint64_t BUFFER_SIZE_TINY = 64;
static const uint64_t BUFFER_SIZE_SMALL = 256;
static const uint64_t BUFFER_SIZE_LARGE = 1024*1024;
-static void CHACHA20(benchmark::State& state, size_t buffersize)
+static void CHACHA20(benchmark::Bench& bench, size_t buffersize)
{
std::vector<uint8_t> key(32,0);
ChaCha20 ctx(key.data(), key.size());
@@ -19,26 +19,26 @@ static void CHACHA20(benchmark::State& state, size_t buffersize)
ctx.Seek(0);
std::vector<uint8_t> in(buffersize,0);
std::vector<uint8_t> out(buffersize,0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
ctx.Crypt(in.data(), out.data(), in.size());
- }
+ });
}
-static void CHACHA20_64BYTES(benchmark::State& state)
+static void CHACHA20_64BYTES(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_TINY);
+ CHACHA20(bench, BUFFER_SIZE_TINY);
}
-static void CHACHA20_256BYTES(benchmark::State& state)
+static void CHACHA20_256BYTES(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_SMALL);
+ CHACHA20(bench, BUFFER_SIZE_SMALL);
}
-static void CHACHA20_1MB(benchmark::State& state)
+static void CHACHA20_1MB(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_LARGE);
+ CHACHA20(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(CHACHA20_64BYTES, 500000);
-BENCHMARK(CHACHA20_256BYTES, 250000);
-BENCHMARK(CHACHA20_1MB, 340);
+BENCHMARK(CHACHA20_64BYTES);
+BENCHMARK(CHACHA20_256BYTES);
+BENCHMARK(CHACHA20_1MB);
diff --git a/src/bench/chacha_poly_aead.cpp b/src/bench/chacha_poly_aead.cpp
index df10f27d03..3b1d3e697a 100644
--- a/src/bench/chacha_poly_aead.cpp
+++ b/src/bench/chacha_poly_aead.cpp
@@ -21,7 +21,7 @@ static const unsigned char k2[32] = {0};
static ChaCha20Poly1305AEAD aead(k1, 32, k2, 32);
-static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, bool include_decryption)
+static void CHACHA20_POLY1305_AEAD(benchmark::Bench& bench, size_t buffersize, bool include_decryption)
{
std::vector<unsigned char> in(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0);
std::vector<unsigned char> out(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0);
@@ -29,7 +29,7 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b
uint64_t seqnr_aad = 0;
int aad_pos = 0;
uint32_t len = 0;
- while (state.KeepRunning()) {
+ bench.batch(buffersize).unit("byte").run([&] {
// encrypt or decrypt the buffer with a static key
assert(aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, out.data(), out.size(), in.data(), buffersize, true));
@@ -53,70 +53,71 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b
seqnr_aad = 0;
aad_pos = 0;
}
- }
+ });
}
-static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, false);
}
-static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, false);
}
-static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, false);
}
-static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, true);
}
-static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, true);
}
-static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, true);
}
// Add Hash() (dbl-sha256) bench for comparison
-static void HASH(benchmark::State& state, size_t buffersize)
+static void HASH(benchmark::Bench& bench, size_t buffersize)
{
uint8_t hash[CHash256::OUTPUT_SIZE];
std::vector<uint8_t> in(buffersize,0);
- while (state.KeepRunning())
- CHash256().Write(in.data(), in.size()).Finalize(hash);
+ bench.batch(in.size()).unit("byte").run([&] {
+ CHash256().Write(in).Finalize(hash);
+ });
}
-static void HASH_64BYTES(benchmark::State& state)
+static void HASH_64BYTES(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_TINY);
+ HASH(bench, BUFFER_SIZE_TINY);
}
-static void HASH_256BYTES(benchmark::State& state)
+static void HASH_256BYTES(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_SMALL);
+ HASH(bench, BUFFER_SIZE_SMALL);
}
-static void HASH_1MB(benchmark::State& state)
+static void HASH_1MB(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_LARGE);
+ HASH(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT, 500000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT, 250000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT, 340);
-BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT, 500000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT, 250000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT, 340);
-BENCHMARK(HASH_64BYTES, 500000);
-BENCHMARK(HASH_256BYTES, 250000);
-BENCHMARK(HASH_1MB, 340);
+BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT);
+BENCHMARK(HASH_64BYTES);
+BENCHMARK(HASH_256BYTES);
+BENCHMARK(HASH_1MB);
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 2b2c78905e..dc0aa4031c 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -14,21 +14,21 @@
// a block off the wire, but before we can relay the block on to peers using
// compact block relay.
-static void DeserializeBlockTest(benchmark::State& state)
+static void DeserializeBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
stream.write(&a, 1); // Prevent compaction
- while (state.KeepRunning()) {
+ bench.unit("block").run([&] {
CBlock block;
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
assert(rewound);
- }
+ });
}
-static void DeserializeAndCheckBlockTest(benchmark::State& state)
+static void DeserializeAndCheckBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
@@ -36,7 +36,7 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
const auto chainParams = CreateChainParams(CBaseChainParams::MAIN);
- while (state.KeepRunning()) {
+ bench.unit("block").run([&] {
CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
@@ -45,8 +45,8 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
BlockValidationState validationState;
bool checked = CheckBlock(block, validationState, chainParams->GetConsensus());
assert(checked);
- }
+ });
}
-BENCHMARK(DeserializeBlockTest, 130);
-BENCHMARK(DeserializeAndCheckBlockTest, 160);
+BENCHMARK(DeserializeBlockTest);
+BENCHMARK(DeserializeAndCheckBlockTest);
diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp
index e052681181..19d7bc0dbc 100644
--- a/src/bench/checkqueue.cpp
+++ b/src/bench/checkqueue.cpp
@@ -24,7 +24,7 @@ static const unsigned int QUEUE_BATCH_SIZE = 128;
// This Benchmark tests the CheckQueue with a slightly realistic workload,
// where checks all contain a prevector that is indirect 50% of the time
// and there is a little bit of work done between calls to Add.
-static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
+static void CCheckQueueSpeedPrevectorJob(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -47,23 +47,28 @@ static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) {
tg.create_thread([&]{queue.Thread();});
}
- while (state.KeepRunning()) {
+
+ // create all the data once, then submit copies in the benchmark.
+ FastRandomContext insecure_rand(true);
+ std::vector<std::vector<PrevectorJob>> vBatches(BATCHES);
+ for (auto& vChecks : vBatches) {
+ vChecks.reserve(BATCH_SIZE);
+ for (size_t x = 0; x < BATCH_SIZE; ++x)
+ vChecks.emplace_back(insecure_rand);
+ }
+
+ bench.minEpochIterations(10).batch(BATCH_SIZE * BATCHES).unit("job").run([&] {
// Make insecure_rand here so that each iteration is identical.
- FastRandomContext insecure_rand(true);
CCheckQueueControl<PrevectorJob> control(&queue);
- std::vector<std::vector<PrevectorJob>> vBatches(BATCHES);
- for (auto& vChecks : vBatches) {
- vChecks.reserve(BATCH_SIZE);
- for (size_t x = 0; x < BATCH_SIZE; ++x)
- vChecks.emplace_back(insecure_rand);
+ for (auto vChecks : vBatches) {
control.Add(vChecks);
}
// control waits for completion by RAII, but
// it is done explicitly here for clarity
control.Wait();
- }
+ });
tg.interrupt_all();
tg.join_all();
ECC_Stop();
}
-BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400);
+BENCHMARK(CCheckQueueSpeedPrevectorJob);
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index f2d12531d7..3a71a6ca03 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -27,7 +27,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector<st
// same one over and over isn't too useful. Generating random isn't useful
// either for measurements."
// (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484)
-static void CoinSelection(benchmark::State& state)
+static void CoinSelection(benchmark::Bench& bench)
{
NodeContext node;
auto chain = interfaces::MakeChain(node);
@@ -51,7 +51,7 @@ static void CoinSelection(benchmark::State& state)
const CoinEligibilityFilter filter_standard(1, 6, 0);
const CoinSelectionParams coin_selection_params(true, 34, 148, CFeeRate(0), 0);
- while (state.KeepRunning()) {
+ bench.run([&] {
std::set<CInputCoin> setCoinsRet;
CAmount nValueRet;
bool bnb_used;
@@ -59,7 +59,7 @@ static void CoinSelection(benchmark::State& state)
assert(success);
assert(nValueRet == 1003 * COIN);
assert(setCoinsRet.size() == 2);
- }
+ });
}
typedef std::set<CInputCoin> CoinSet;
@@ -91,7 +91,7 @@ static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool)
return target;
}
-static void BnBExhaustion(benchmark::State& state)
+static void BnBExhaustion(benchmark::Bench& bench)
{
// Setup
testWallet.SetupLegacyScriptPubKeyMan();
@@ -100,7 +100,7 @@ static void BnBExhaustion(benchmark::State& state)
CAmount value_ret = 0;
CAmount not_input_fees = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
// Benchmark
CAmount target = make_hard_case(17, utxo_pool);
SelectCoinsBnB(utxo_pool, target, 0, selection, value_ret, not_input_fees); // Should exhaust
@@ -108,8 +108,8 @@ static void BnBExhaustion(benchmark::State& state)
// Cleanup
utxo_pool.clear();
selection.clear();
- }
+ });
}
-BENCHMARK(CoinSelection, 650);
-BENCHMARK(BnBExhaustion, 650);
+BENCHMARK(CoinSelection);
+BENCHMARK(BnBExhaustion);
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index ddcef5121e..36be86bcc8 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -16,88 +16,92 @@
/* Number of bytes to hash per iteration */
static const uint64_t BUFFER_SIZE = 1000*1000;
-static void RIPEMD160(benchmark::State& state)
+static void RIPEMD160(benchmark::Bench& bench)
{
uint8_t hash[CRIPEMD160::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CRIPEMD160().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA1(benchmark::State& state)
+static void SHA1(benchmark::Bench& bench)
{
uint8_t hash[CSHA1::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA1().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA256(benchmark::State& state)
+static void SHA256(benchmark::Bench& bench)
{
uint8_t hash[CSHA256::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA256().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA256_32b(benchmark::State& state)
+static void SHA256_32b(benchmark::Bench& bench)
{
std::vector<uint8_t> in(32,0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA256()
.Write(in.data(), in.size())
.Finalize(in.data());
- }
+ });
}
-static void SHA256D64_1024(benchmark::State& state)
+static void SHA256D64_1024(benchmark::Bench& bench)
{
std::vector<uint8_t> in(64 * 1024, 0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
SHA256D64(in.data(), in.data(), 1024);
- }
+ });
}
-static void SHA512(benchmark::State& state)
+static void SHA512(benchmark::Bench& bench)
{
uint8_t hash[CSHA512::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA512().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SipHash_32b(benchmark::State& state)
+static void SipHash_32b(benchmark::Bench& bench)
{
uint256 x;
uint64_t k1 = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
*((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x);
- }
+ });
}
-static void FastRandom_32bit(benchmark::State& state)
+static void FastRandom_32bit(benchmark::Bench& bench)
{
FastRandomContext rng(true);
- while (state.KeepRunning()) {
+ bench.run([&] {
rng.rand32();
- }
+ });
}
-static void FastRandom_1bit(benchmark::State& state)
+static void FastRandom_1bit(benchmark::Bench& bench)
{
FastRandomContext rng(true);
- while (state.KeepRunning()) {
+ bench.run([&] {
rng.randbool();
- }
+ });
}
-BENCHMARK(RIPEMD160, 440);
-BENCHMARK(SHA1, 570);
-BENCHMARK(SHA256, 340);
-BENCHMARK(SHA512, 330);
+BENCHMARK(RIPEMD160);
+BENCHMARK(SHA1);
+BENCHMARK(SHA256);
+BENCHMARK(SHA512);
-BENCHMARK(SHA256_32b, 4700 * 1000);
-BENCHMARK(SipHash_32b, 40 * 1000 * 1000);
-BENCHMARK(SHA256D64_1024, 7400);
-BENCHMARK(FastRandom_32bit, 110 * 1000 * 1000);
-BENCHMARK(FastRandom_1bit, 440 * 1000 * 1000);
+BENCHMARK(SHA256_32b);
+BENCHMARK(SipHash_32b);
+BENCHMARK(SHA256D64_1024);
+BENCHMARK(FastRandom_32bit);
+BENCHMARK(FastRandom_1bit);
diff --git a/src/bench/duplicate_inputs.cpp b/src/bench/duplicate_inputs.cpp
index e87f15042b..5745e4276c 100644
--- a/src/bench/duplicate_inputs.cpp
+++ b/src/bench/duplicate_inputs.cpp
@@ -12,7 +12,7 @@
#include <validation.h>
-static void DuplicateInputs(benchmark::State& state)
+static void DuplicateInputs(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -61,11 +61,11 @@ static void DuplicateInputs(benchmark::State& state)
block.hashMerkleRoot = BlockMerkleRoot(block);
- while (state.KeepRunning()) {
+ bench.run([&] {
BlockValidationState cvstate{};
assert(!CheckBlock(block, cvstate, chainparams.GetConsensus(), false, false));
assert(cvstate.GetRejectReason() == "bad-txns-inputs-duplicate");
- }
+ });
}
-BENCHMARK(DuplicateInputs, 10);
+BENCHMARK(DuplicateInputs);
diff --git a/src/bench/examples.cpp b/src/bench/examples.cpp
index f88150200a..dcd615b9da 100644
--- a/src/bench/examples.cpp
+++ b/src/bench/examples.cpp
@@ -3,31 +3,19 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <util/time.h>
-
-// Sanity test: this should loop ten times, and
-// min/max/average should be close to 100ms.
-static void Sleep100ms(benchmark::State& state)
-{
- while (state.KeepRunning()) {
- UninterruptibleSleep(std::chrono::milliseconds{100});
- }
-}
-
-BENCHMARK(Sleep100ms, 10);
// Extremely fast-running benchmark:
#include <math.h>
volatile double sum = 0.0; // volatile, global so not optimized away
-static void Trig(benchmark::State& state)
+static void Trig(benchmark::Bench& bench)
{
double d = 0.01;
- while (state.KeepRunning()) {
+ bench.run([&] {
sum += sin(d);
d += 0.000001;
- }
+ });
}
-BENCHMARK(Trig, 12 * 1000 * 1000);
+BENCHMARK(Trig);
diff --git a/src/bench/gcs_filter.cpp b/src/bench/gcs_filter.cpp
index 535ad35571..ef83242e41 100644
--- a/src/bench/gcs_filter.cpp
+++ b/src/bench/gcs_filter.cpp
@@ -5,7 +5,7 @@
#include <bench/bench.h>
#include <blockfilter.h>
-static void ConstructGCSFilter(benchmark::State& state)
+static void ConstructGCSFilter(benchmark::Bench& bench)
{
GCSFilter::ElementSet elements;
for (int i = 0; i < 10000; ++i) {
@@ -16,14 +16,14 @@ static void ConstructGCSFilter(benchmark::State& state)
}
uint64_t siphash_k0 = 0;
- while (state.KeepRunning()) {
+ bench.batch(elements.size()).unit("elem").run([&] {
GCSFilter filter({siphash_k0, 0, 20, 1 << 20}, elements);
siphash_k0++;
- }
+ });
}
-static void MatchGCSFilter(benchmark::State& state)
+static void MatchGCSFilter(benchmark::Bench& bench)
{
GCSFilter::ElementSet elements;
for (int i = 0; i < 10000; ++i) {
@@ -34,10 +34,10 @@ static void MatchGCSFilter(benchmark::State& state)
}
GCSFilter filter({0, 0, 20, 1 << 20}, elements);
- while (state.KeepRunning()) {
+ bench.unit("elem").run([&] {
filter.Match(GCSFilter::Element());
- }
+ });
}
-BENCHMARK(ConstructGCSFilter, 1000);
-BENCHMARK(MatchGCSFilter, 50 * 1000);
+BENCHMARK(ConstructGCSFilter);
+BENCHMARK(MatchGCSFilter);
diff --git a/src/bench/hashpadding.cpp b/src/bench/hashpadding.cpp
index 985be8bdba..309cae3723 100644
--- a/src/bench/hashpadding.cpp
+++ b/src/bench/hashpadding.cpp
@@ -8,7 +8,7 @@
#include <uint256.h>
-static void PrePadded(benchmark::State& state)
+static void PrePadded(benchmark::Bench& bench)
{
CSHA256 hasher;
@@ -18,30 +18,30 @@ static void PrePadded(benchmark::State& state)
hasher.Write(nonce.begin(), 32);
hasher.Write(nonce.begin(), 32);
uint256 data = GetRandHash();
- while (state.KeepRunning()) {
+ bench.run([&] {
unsigned char out[32];
CSHA256 h = hasher;
h.Write(data.begin(), 32);
h.Finalize(out);
- }
+ });
}
-BENCHMARK(PrePadded, 10000);
+BENCHMARK(PrePadded);
-static void RegularPadded(benchmark::State& state)
+static void RegularPadded(benchmark::Bench& bench)
{
CSHA256 hasher;
// Setup the salted hasher
uint256 nonce = GetRandHash();
uint256 data = GetRandHash();
- while (state.KeepRunning()) {
+ bench.run([&] {
unsigned char out[32];
CSHA256 h = hasher;
h.Write(nonce.begin(), 32);
h.Write(data.begin(), 32);
h.Finalize(out);
- }
+ });
}
-BENCHMARK(RegularPadded, 10000);
+BENCHMARK(RegularPadded);
diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp
index 5d943810df..32b060a15a 100644
--- a/src/bench/lockedpool.cpp
+++ b/src/bench/lockedpool.cpp
@@ -9,10 +9,9 @@
#include <vector>
#define ASIZE 2048
-#define BITER 5000
#define MSIZE 2048
-static void BenchLockedPool(benchmark::State& state)
+static void BenchLockedPool(benchmark::Bench& bench)
{
void *synth_base = reinterpret_cast<void*>(0x08000000);
const size_t synth_size = 1024*1024;
@@ -22,24 +21,22 @@ static void BenchLockedPool(benchmark::State& state)
for (int x=0; x<ASIZE; ++x)
addr.push_back(nullptr);
uint32_t s = 0x12345678;
- while (state.KeepRunning()) {
- for (int x=0; x<BITER; ++x) {
- int idx = s & (addr.size()-1);
- if (s & 0x80000000) {
- b.free(addr[idx]);
- addr[idx] = nullptr;
- } else if(!addr[idx]) {
- addr[idx] = b.alloc((s >> 16) & (MSIZE-1));
- }
- bool lsb = s & 1;
- s >>= 1;
- if (lsb)
- s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
+ bench.run([&] {
+ int idx = s & (addr.size() - 1);
+ if (s & 0x80000000) {
+ b.free(addr[idx]);
+ addr[idx] = nullptr;
+ } else if (!addr[idx]) {
+ addr[idx] = b.alloc((s >> 16) & (MSIZE - 1));
}
- }
+ bool lsb = s & 1;
+ s >>= 1;
+ if (lsb)
+ s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
+ });
for (void *ptr: addr)
b.free(ptr);
addr.clear();
}
-BENCHMARK(BenchLockedPool, 1300);
+BENCHMARK(BenchLockedPool);
diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp
index 69483f2914..1b9e428c9d 100644
--- a/src/bench/mempool_eviction.cpp
+++ b/src/bench/mempool_eviction.cpp
@@ -23,7 +23,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& nFee, CTxMemPool& po
// Right now this is only testing eviction performance in an extremely small
// mempool. Code needs to be written to generate a much wider variety of
// unique transactions for a more meaningful performance measurement.
-static void MempoolEviction(benchmark::State& state)
+static void MempoolEviction(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -125,7 +125,7 @@ static void MempoolEviction(benchmark::State& state)
const CTransactionRef tx6_r{MakeTransactionRef(tx6)};
const CTransactionRef tx7_r{MakeTransactionRef(tx7)};
- while (state.KeepRunning()) {
+ bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
AddTx(tx1_r, 10000LL, pool);
AddTx(tx2_r, 5000LL, pool);
AddTx(tx3_r, 20000LL, pool);
@@ -135,7 +135,7 @@ static void MempoolEviction(benchmark::State& state)
AddTx(tx7_r, 9000LL, pool);
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
pool.TrimToSize(GetVirtualTransactionSize(*tx1_r));
- }
+ });
}
-BENCHMARK(MempoolEviction, 41000);
+BENCHMARK(MempoolEviction);
diff --git a/src/bench/mempool_stress.cpp b/src/bench/mempool_stress.cpp
index 38d8632318..89233e390c 100644
--- a/src/bench/mempool_stress.cpp
+++ b/src/bench/mempool_stress.cpp
@@ -26,8 +26,13 @@ struct Available {
Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){}
};
-static void ComplexMemPool(benchmark::State& state)
+static void ComplexMemPool(benchmark::Bench& bench)
{
+ int childTxs = 800;
+ if (bench.complexityN() > 1) {
+ childTxs = static_cast<int>(bench.complexityN());
+ }
+
FastRandomContext det_rand{true};
std::vector<Available> available_coins;
std::vector<CTransactionRef> ordered_coins;
@@ -46,7 +51,7 @@ static void ComplexMemPool(benchmark::State& state)
ordered_coins.emplace_back(MakeTransactionRef(tx));
available_coins.emplace_back(ordered_coins.back(), tx_counter++);
}
- for (auto x = 0; x < 800 && !available_coins.empty(); ++x) {
+ for (auto x = 0; x < childTxs && !available_coins.empty(); ++x) {
CMutableTransaction tx = CMutableTransaction();
size_t n_ancestors = det_rand.randrange(10)+1;
for (size_t ancestor = 0; ancestor < n_ancestors && !available_coins.empty(); ++ancestor){
@@ -77,13 +82,13 @@ static void ComplexMemPool(benchmark::State& state)
TestingSetup test_setup;
CTxMemPool pool;
LOCK2(cs_main, pool.cs);
- while (state.KeepRunning()) {
+ bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
for (auto& tx : ordered_coins) {
AddTx(tx, pool);
}
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front()));
- }
+ });
}
-BENCHMARK(ComplexMemPool, 1);
+BENCHMARK(ComplexMemPool);
diff --git a/src/bench/merkle_root.cpp b/src/bench/merkle_root.cpp
index e84f92feae..ba6629b9f0 100644
--- a/src/bench/merkle_root.cpp
+++ b/src/bench/merkle_root.cpp
@@ -8,7 +8,7 @@
#include <random.h>
#include <uint256.h>
-static void MerkleRoot(benchmark::State& state)
+static void MerkleRoot(benchmark::Bench& bench)
{
FastRandomContext rng(true);
std::vector<uint256> leaves;
@@ -16,11 +16,11 @@ static void MerkleRoot(benchmark::State& state)
for (auto& item : leaves) {
item = rng.rand256();
}
- while (state.KeepRunning()) {
+ bench.batch(leaves.size()).unit("leaf").run([&] {
bool mutation = false;
uint256 hash = ComputeMerkleRoot(std::vector<uint256>(leaves), &mutation);
leaves[mutation] = hash;
- }
+ });
}
-BENCHMARK(MerkleRoot, 800);
+BENCHMARK(MerkleRoot);
diff --git a/src/bench/nanobench.cpp b/src/bench/nanobench.cpp
new file mode 100644
index 0000000000..fcdd86495a
--- /dev/null
+++ b/src/bench/nanobench.cpp
@@ -0,0 +1,6 @@
+// Copyright (c) 2019-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#define ANKERL_NANOBENCH_IMPLEMENT
+#include <bench/nanobench.h>
diff --git a/src/bench/nanobench.h b/src/bench/nanobench.h
new file mode 100644
index 0000000000..c5379e7fd4
--- /dev/null
+++ b/src/bench/nanobench.h
@@ -0,0 +1,3225 @@
+// __ _ _______ __ _ _____ ______ _______ __ _ _______ _ _
+// | \ | |_____| | \ | | | |_____] |______ | \ | | |_____|
+// | \_| | | | \_| |_____| |_____] |______ | \_| |_____ | |
+//
+// Microbenchmark framework for C++11/14/17/20
+// https://github.com/martinus/nanobench
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2019-2020 Martin Ankerl <martin.ankerl@gmail.com>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ANKERL_NANOBENCH_H_INCLUDED
+#define ANKERL_NANOBENCH_H_INCLUDED
+
+// see https://semver.org/
+#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
+#define ANKERL_NANOBENCH_VERSION_MINOR 0 // backwards-compatible changes
+#define ANKERL_NANOBENCH_VERSION_PATCH 0 // backwards-compatible bug fixes
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// public facing api - as minimal as possible
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include <chrono> // high_resolution_clock
+#include <cstring> // memcpy
+#include <iosfwd> // for std::ostream* custom output target in Config
+#include <string> // all names
+#include <vector> // holds all results
+
+#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
+
+#define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus
+#define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L
+#define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L
+#define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L
+#define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L
+
+#if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17)
+# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]]
+#else
+# define ANKERL_NANOBENCH_PRIVATE_NODISCARD()
+#endif
+
+#if defined(__clang__)
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"")
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop")
+#else
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH()
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP()
+#endif
+
+#if defined(__GNUC__)
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"")
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop")
+#else
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH()
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP()
+#endif
+
+#if defined(ANKERL_NANOBENCH_LOG_ENABLED)
+# include <iostream>
+# define ANKERL_NANOBENCH_LOG(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
+#else
+# define ANKERL_NANOBENCH_LOG(x)
+#endif
+
+#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
+# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1
+#else
+# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
+#endif
+
+#if defined(__clang__)
+# define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__)))
+#else
+# define ANKERL_NANOBENCH_NO_SANITIZE(...)
+#endif
+
+#if defined(_MSC_VER)
+# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline)
+#else
+# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline))
+#endif
+
+// workaround missing "is_trivially_copyable" in g++ < 5.0
+// See https://stackoverflow.com/a/31798726/48181
+#if defined(__GNUC__) && __GNUC__ < 5
+# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
+#else
+# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
+#endif
+
+// declarations ///////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock,
+ std::chrono::steady_clock>::type;
+class Bench;
+struct Config;
+class Result;
+class Rng;
+class BigO;
+
+/**
+ * @brief Renders output from a mustache-like template and benchmark results.
+ *
+ * The templating facility here is heavily inspired by [mustache - logic-less templates](https://mustache.github.io/).
+ * It adds a few more features that are necessary to get all of the captured data out of nanobench. Please read the
+ * excellent [mustache manual](https://mustache.github.io/mustache.5.html) to see what this is all about.
+ *
+ * nanobench output has two nested layers, *result* and *measurement*. Here is a hierarchy of the allowed tags:
+ *
+ * * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as
+ * a benchmark result is available. Within it, you can use these tags:
+ *
+ * * `{{title}}` See Bench::title().
+ *
+ * * `{{name}}` Benchmark name, usually directly provided with Bench::run(), but can also be set with Bench::name().
+ *
+ * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::title().
+ *
+ * * `{{batch}}` Batch size, see Bench::batch().
+ *
+ * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN().
+ *
+ * * `{{epochs}}` Number of epochs, see Bench::epochs().
+ *
+ * * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock.
+ * For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first
+ * benchmark that is run, and used as a static variable throughout the application's runtime.
+ *
+ * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple().
+ * This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster
+ * will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast.
+ *
+ * * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least
+ * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime().
+ *
+ * * `{{minEpochTime}}` Minimum epoch time, usually not set. See Bench::minEpochTime().
+ *
+ * * `{{minEpochIterations}}` See Bench::minEpochIterations().
+ *
+ * * `{{epochIterations}}` See Bench::epochIterations().
+ *
+ * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup().
+ *
+ * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative().
+ *
+ * Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations
+ * are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters
+ * are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`,
+ * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measuers (except `iterations`) are
+ * provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available:
+ *
+ * * `{{median(<name>>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`.
+ *
+ * * `{{average(<name>)}}` Average (mean) calculation.
+ *
+ * * `{{medianAbsolutePercentError(<name>)}}` Calculates MdAPE, the Median Absolute Percentage Error. The MdAPE is an excellent
+ * metric for the variation of measurements. It is more robust to outliers than the
+ * [Mean absolute percentage error (M-APE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error).
+ * @f[
+ * \mathrm{medianAbsolutePercentError}(e) = \mathrm{median}\{| \frac{e_i - \mathrm{median}\{e\}}{e_i}| \}
+ * @f]
+ * E.g. for *elapsed*: First, @f$ \mathrm{median}\{elapsed\} @f$ is calculated. This is used to calculate the absolute percentage
+ * error to this median for each measurement, as in @f$ | \frac{e_i - \mathrm{median}\{e\}}{e_i}| @f$. All these results
+ * are sorted, and the middle value is chosen as the median absolute percent error.
+ *
+ * This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the
+ * measurements deviate less than 5% from the median, and the other deviate more than 5% from the median.
+ *
+ * * `{{sum(<name>)}}` Sums of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
+* measured in this benchmark.
+ *
+ * * `{{minimum(<name>)}}` Minimum of all measurements.
+ *
+ * * `{{maximum(<name>)}}` Maximum of all measurements.
+ *
+ * * `{{sumProduct(<first>, <second>)}}` Calculates the sum of the products of corresponding measures:
+ * @f[
+ * \mathrm{sumProduct}(a,b) = \sum_{i=1}^{n}a_i\cdot b_i
+ * @f]
+ * E.g. to calculate total runtime of the benchmark, you multiply iterations with elapsed time for each measurement, and
+ * sum these results up:
+ * `{{sumProduct(iterations, elapsed)}}`.
+ *
+ * * `{{#measurement}}` To access individual measurement results, open the begin tag for measurements.
+ *
+ * * `{{elapsed}}` Average elapsed time per iteration, in seconds.
+ *
+ * * `{{iterations}}` Number of iterations in the measurement. The number of iterations will fluctuate due
+ * to some applied randomness, to enhance accuracy.
+ *
+ * * `{{pagefaults}}` Average number of pagefaults per iteration.
+ *
+ * * `{{cpucycles}}` Average number of CPU cycles processed per iteration.
+ *
+ * * `{{contextswitches}}` Average number of context switches per iteration.
+ *
+ * * `{{instructions}}` Average number of retired instructions per iteration.
+ *
+ * * `{{branchinstructions}}` Average number of branches executed per iteration.
+ *
+ * * `{{branchmisses}}` Average number of branches that were missed per iteration.
+ *
+ * * `{{/measurement}}` Ends the measurement tag.
+ *
+ * * `{{/result}}` Marks the end of the result layer. This is the end marker for the template part that will be instantiated
+ * for each benchmark result.
+ *
+ *
+ * For the layer tags *result* and *measurement* you additionally can use these special markers:
+ *
+ * * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only
+ * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``.
+ *
+ * * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This,
+ * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
+ *
+ * * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``.
+ *
+ * * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only
+ * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``.
+ *
+ * * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This,
+ * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
+ *
+ * * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``.
+ *
+ @verbatim embed:rst
+
+ For an overview of all the possible data you can get out of nanobench, please see the tutorial at :ref:`tutorial-template-json`.
+
+ The templates that ship with nanobench are:
+
+ * :cpp:func:`templates::csv() <ankerl::nanobench::templates::csv()>`
+ * :cpp:func:`templates::json() <ankerl::nanobench::templates::json()>`
+ * :cpp:func:`templates::htmlBoxplot() <ankerl::nanobench::templates::htmlBoxplot()>`
+
+ @endverbatim
+ *
+ * @param mustacheTemplate The template.
+ * @param bench Benchmark, containing all the results.
+ * @param out Output for the generated output.
+ */
+void render(char const* mustacheTemplate, Bench const& bench, std::ostream& out);
+
+/**
+ * Same as render(char const* mustacheTemplate, Bench const& bench, std::ostream& out), but for when
+ * you only have results available.
+ *
+ * @param mustacheTemplate The template.
+ * @param results All the results to be used for rendering.
+ * @param out Output for the generated output.
+ */
+void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out);
+
+// Contains mustache-like templates
+namespace templates {
+
+/*!
+ @brief CSV data for the benchmark results.
+
+ Generates a comma-separated values dataset. First line is the header, each following line is a summary of each benchmark run.
+
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-csv` for an example.
+ @endverbatim
+ */
+char const* csv() noexcept;
+
+/*!
+ @brief HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an example output.
+
+ The output uses only the elapsed time, and displays each epoch as a single dot.
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-html` for an example.
+ @endverbatim
+
+ @see ankerl::nanobench::render()
+ */
+char const* htmlBoxplot() noexcept;
+
+/*!
+ @brief Template to generate JSON data.
+
+ The generated JSON data contains *all* data that has been generated. All times are as double values, in seconds. The output can get
+ quite large.
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-json` for an example.
+ @endverbatim
+ */
+char const* json() noexcept;
+
+} // namespace templates
+
+namespace detail {
+
+template <typename T>
+struct PerfCountSet;
+
+class IterationLogic;
+class PerformanceCounters;
+
+#if ANKERL_NANOBENCH(PERF_COUNTERS)
+class LinuxPerformanceCounters;
+#endif
+
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// definitions ////////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+namespace detail {
+
+template <typename T>
+struct PerfCountSet {
+ T pageFaults{};
+ T cpuCycles{};
+ T contextSwitches{};
+ T instructions{};
+ T branchInstructions{};
+ T branchMisses{};
+};
+
+} // namespace detail
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct Config {
+ // actual benchmark config
+ std::string mBenchmarkTitle = "benchmark";
+ std::string mBenchmarkName = "noname";
+ std::string mUnit = "op";
+ double mBatch = 1.0;
+ double mComplexityN = -1.0;
+ size_t mNumEpochs = 11;
+ size_t mClockResolutionMultiple = static_cast<size_t>(1000);
+ std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
+ std::chrono::nanoseconds mMinEpochTime{};
+ uint64_t mMinEpochIterations{1};
+ uint64_t mEpochIterations{0}; // If not 0, run *exactly* these number of iterations per epoch.
+ uint64_t mWarmup = 0;
+ std::ostream* mOut = nullptr;
+ bool mShowPerformanceCounters = true;
+ bool mIsRelative = false;
+
+ Config();
+ ~Config();
+ Config& operator=(Config const&);
+ Config& operator=(Config&&);
+ Config(Config const&);
+ Config(Config&&) noexcept;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Result returned after a benchmark has finished. Can be used as a baseline for relative().
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class Result {
+public:
+ enum class Measure : size_t {
+ elapsed,
+ iterations,
+ pagefaults,
+ cpucycles,
+ contextswitches,
+ instructions,
+ branchinstructions,
+ branchmisses,
+ _size
+ };
+
+ explicit Result(Config const& benchmarkConfig);
+
+ ~Result();
+ Result& operator=(Result const&);
+ Result& operator=(Result&&);
+ Result(Result const&);
+ Result(Result&&) noexcept;
+
+ // adds new measurement results
+ // all values are scaled by iters (except iters...)
+ void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc);
+
+ ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept;
+
+ ANKERL_NANOBENCH(NODISCARD) double median(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double medianAbsolutePercentError(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double average(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double sum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept;
+
+ ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) bool empty() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t size() const noexcept;
+
+ // Finds string, if not found, returns _size.
+ static Measure fromString(std::string const& str);
+
+private:
+ Config mConfig{};
+ std::vector<std::vector<double>> mNameToMeasurements{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+/**
+ * An extremely fast random generator. Currently, this implements *RomuDuoJr*, developed by Mark Overton. Source:
+ * http://www.romu-random.org/
+ *
+ * RomuDuoJr is extremely fast and provides reasonable good randomness. Not enough for large jobs, but definitely
+ * good enough for a benchmarking framework.
+ *
+ * * Estimated capacity: @f$ 2^{51} @f$ bytes
+ * * Register pressure: 4
+ * * State size: 128 bits
+ *
+ * This random generator is a drop-in replacement for the generators supplied by ``<random>``. It is not
+ * cryptographically secure. It's intended purpose is to be very fast so that benchmarks that make use
+ * of randomness are not distorted too much by the random generator.
+ *
+ * Rng also provides a few non-standard helpers, optimized for speed.
+ */
+class Rng final {
+public:
+ /**
+ * @brief This RNG provides 64bit randomness.
+ */
+ using result_type = uint64_t;
+
+ static constexpr uint64_t(min)();
+ static constexpr uint64_t(max)();
+
+ /**
+ * As a safety precausion, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
+ * same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is
+ * automatically seeded from `std::random_device`. If you really need a copy, use copy().
+ */
+ Rng(Rng const&) = delete;
+
+ /**
+ * Same as Rng(Rng const&), we don't allow assignment. If you need a new Rng create one with the default constructor Rng().
+ */
+ Rng& operator=(Rng const&) = delete;
+
+ // moving is ok
+ Rng(Rng&&) noexcept = default;
+ Rng& operator=(Rng&&) noexcept = default;
+ ~Rng() noexcept = default;
+
+ /**
+ * @brief Creates a new Random generator with random seed.
+ *
+ * Instead of a default seed (as the random generators from the STD), this properly seeds the random generator from
+ * `std::random_device`. It guarantees correct seeding. Note that seeding can be relatively slow, depending on the source of
+ * randomness used. So it is best to create a Rng once and use it for all your randomness purposes.
+ */
+ Rng();
+
+ /*!
+ Creates a new Rng that is seeded with a specific seed. Each Rng created from the same seed will produce the same randomness
+ sequence. This can be useful for deterministic behavior.
+
+ @verbatim embed:rst
+ .. note::
+
+ The random algorithm might change between nanobench releases. Whenever a faster and/or better random
+ generator becomes available, I will switch the implementation.
+ @endverbatim
+
+ As per the Romu paper, this seeds the Rng with splitMix64 algorithm and performs 10 initial rounds for further mixing up of the
+ internal state.
+
+ @param seed The 64bit seed. All values are allowed, even 0.
+ */
+ explicit Rng(uint64_t seed) noexcept;
+ Rng(uint64_t x, uint64_t y) noexcept;
+
+ /**
+ * Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original.
+ */
+ ANKERL_NANOBENCH(NODISCARD) Rng copy() const noexcept;
+
+ /**
+ * @brief Produces a 64bit random value. This should be very fast, thus it is marked as inline. In my benchmark, this is ~46 times
+ * faster than `std::default_random_engine` for producing 64bit random values. It seems that the fastest std contender is
+ * `std::mt19937_64`. Still, this RNG is 2-3 times as fast.
+ *
+ * @return uint64_t The next 64 bit random value.
+ */
+ inline uint64_t operator()() noexcept;
+
+ // This is slightly biased. See
+
+ /**
+ * Generates a random number between 0 and range (excluding range).
+ *
+ * The algorithm only produces 32bit numbers, and is slightly biased. The effect is quite small unless your range is close to the
+ * maximum value of an integer. It is possible to correct the bias with rejection sampling (see
+ * [here](https://lemire.me/blog/2016/06/30/fast-random-shuffling/), but this is most likely irrelevant in practices for the
+ * purposes of this Rng.
+ *
+ * See Daniel Lemire's blog post [A fast alternative to the modulo
+ * reduction](https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/)
+ *
+ * @param range Upper exclusive range. E.g a value of 3 will generate random numbers 0, 1, 2.
+ * @return uint32_t Generated random values in range [0, range(.
+ */
+ inline uint32_t bounded(uint32_t range) noexcept;
+
+ // random double in range [0, 1(
+ // see http://prng.di.unimi.it/
+
+ /**
+ * Provides a random uniform double value between 0 and 1. This uses the method described in [Generating uniform doubles in the
+ * unit interval](http://prng.di.unimi.it/), and is extremely fast.
+ *
+ * @return double Uniformly distributed double value in range [0,1(, excluding 1.
+ */
+ inline double uniform01() noexcept;
+
+ /**
+ * Shuffles all entries in the given container. Although this has a slight bias due to the implementation of bounded(), this is
+ * preferable to `std::shuffle` because it is over 5 times faster. See Daniel Lemire's blog post [Fast random
+ * shuffling](https://lemire.me/blog/2016/06/30/fast-random-shuffling/).
+ *
+ * @param container The whole container will be shuffled.
+ */
+ template <typename Container>
+ void shuffle(Container& container) noexcept;
+
+private:
+ static constexpr uint64_t rotl(uint64_t x, unsigned k) noexcept;
+
+ uint64_t mX;
+ uint64_t mY;
+};
+
+/**
+ * @brief Main entry point to nanobench's benchmarking facility.
+ *
+ * It holds configuration and results from one or more benchmark runs. Usually it is used in a single line, where the object is
+ * constructed, configured, and then a benchmark is run. E.g. like this:
+ *
+ * ankerl::nanobench::Bench().unit("byte").batch(1000).run("random fluctuations", [&] {
+ * // here be the benchmark code
+ * });
+ *
+ * In that example Bench() constructs the benchmark, it is then configured with unit() and batch(), and after configuration a
+ * benchmark is executed with run(). Once run() has finished, it prints the result to `std::cout`. It would also store the results
+ * in the Bench instance, but in this case the object is immediately destroyed so it's not available any more.
+ */
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class Bench {
+public:
+ /**
+ * @brief Creates a new benchmark for configuration and running of benchmarks.
+ */
+ Bench();
+
+ Bench(Bench&& other);
+ Bench& operator=(Bench&& other);
+ Bench(Bench const& other);
+ Bench& operator=(Bench const& other);
+ ~Bench() noexcept;
+
+ /*!
+ @brief Repeatedly calls `op()` based on the configuration, and performs measurements.
+
+ This call is marked with `noinline` to prevent the compiler to optimize beyond different benchmarks. This can have quite a big
+ effect on benchmark accuracy.
+
+ @verbatim embed:rst
+ .. note::
+
+ Each call to your lambda must have a side effect that the compiler can't possibly optimize it away. E.g. add a result to an
+ externally defined number (like `x` in the above example), and finally call `doNotOptimizeAway` on the variables the compiler
+ must not remove. You can also use :cpp:func:`ankerl::nanobench::doNotOptimizeAway` directly in the lambda, but be aware that
+ this has a small overhead.
+
+ @endverbatim
+
+ @tparam Op The code to benchmark.
+ */
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(char const* benchmarkName, Op&& op);
+
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(std::string const& benchmarkName, Op&& op);
+
+ /**
+ * @brief Same as run(char const* benchmarkName, Op op), but instead uses the previously set name.
+ * @tparam Op The code to benchmark.
+ */
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(Op&& op);
+
+ /**
+ * @brief Title of the benchmark, will be shown in the table header. Changing the title will start a new markdown table.
+ *
+ * @param benchmarkTitle The title of the benchmark.
+ */
+ Bench& title(char const* benchmarkTitle);
+ Bench& title(std::string const& benchmarkTitle);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept;
+
+ /// Name of the benchmark, will be shown in the table row.
+ Bench& name(char const* benchmarkName);
+ Bench& name(std::string const& benchmarkName);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
+
+ /**
+ * @brief Sets the batch size.
+ *
+ * E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark
+ * hashing of a 1000 byte long string and want byte/sec as a result, you can specify 1000 as the batch size.
+ *
+ * @tparam T Any input type is internally cast to `double`.
+ * @param b batch size
+ */
+ template <typename T>
+ Bench& batch(T b) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double batch() const noexcept;
+
+ /**
+ * @brief Sets the operation unit.
+ *
+ * Defaults to "op". Could be e.g. "byte" for string processing. This is used for the table header, e.g. to show `ns/byte`. Use
+ * singular (*byte*, not *bytes*). A change clears the currently collected results.
+ *
+ * @param unit The unit name.
+ */
+ Bench& unit(char const* unit);
+ Bench& unit(std::string const& unit);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& unit() const noexcept;
+
+ /**
+ * @brief Set the output stream where the resulting markdown table will be printed to.
+ *
+ * The default is `&std::cout`. You can disable all output by setting `nullptr`.
+ *
+ * @param outstream Pointer to output stream, can be `nullptr`.
+ */
+ Bench& output(std::ostream* outstream) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::ostream* output() const noexcept;
+
+ /**
+ * Modern processors have a very accurate clock, being able to measure as low as 20 nanoseconds. This is the main trick nanobech to
+ * be so fast: we find out how accurate the clock is, then run the benchmark only so often that the clock's accuracy is good enough
+ * for accurate measurements.
+ *
+ * The default is to run one epoch for 1000 times the clock resolution. So for 20ns resolution and 11 epochs, this gives a total
+ * runtime of
+ *
+ * @f[
+ * 20ns * 1000 * 11 \approx 0.2ms
+ * @f]
+ *
+ * To be precise, nanobench adds a 0-20% random noise to each evaluation. This is to prevent any aliasing effects, and further
+ * improves accuracy.
+ *
+ * Total runtime will be higher though: Some initial time is needed to find out the target number of iterations for each epoch, and
+ * there is some overhead involved to start & stop timers and calculate resulting statistics and writing the output.
+ *
+ * @param multiple Target number of times of clock resolution. Usually 1000 is a good compromise between runtime and accuracy.
+ */
+ Bench& clockResolutionMultiple(size_t multiple) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t clockResolutionMultiple() const noexcept;
+
+ /**
+ * @brief Controls number of epochs, the number of measurements to perform.
+ *
+ * The reported result will be the median of evaluation of each epoch. The higher you choose this, the more
+ * deterministic the result be and outliers will be more easily removed. Also the `err%` will be more accurate the higher this
+ * number is. Note that the `err%` will not necessarily decrease when number of epochs is increased. But it will be a more accurate
+ * representation of the benchmarked code's runtime stability.
+ *
+ * Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy.
+ * This setting goes hand in hand with minEpocIterations() (or minEpochTime()). If you are more interested in *median* runtime, you
+ * might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase minEpochIterations()
+ * instead.
+ *
+ * @param numEpochs Number of epochs.
+ */
+ Bench& epochs(size_t numEpochs) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t epochs() const noexcept;
+
+ /**
+ * @brief Upper limit for the runtime of each epoch.
+ *
+ * As a safety precausion if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
+ * epoch. Default is 100ms. At least a single evaluation of the benchmark is performed.
+ *
+ * @see minEpochTime(), minEpochIterations()
+ *
+ * @param t Maximum target runtime for a single epoch.
+ */
+ Bench& maxEpochTime(std::chrono::nanoseconds t) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds maxEpochTime() const noexcept;
+
+ /**
+ * @brief Minimum time each epoch should take.
+ *
+ * Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see
+ * that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations().
+ *
+ * @see maxEpochTime(), minEpochIterations()
+ *
+ * @param t Minimum time each epoch should take.
+ */
+ Bench& minEpochTime(std::chrono::nanoseconds t) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds minEpochTime() const noexcept;
+
+ /**
+ * @brief Sets the minimum number of iterations each epoch should take.
+ *
+ * Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want
+ * to increase the minimum number or iterations, or increase the minEpochTime().
+ *
+ * @see minEpochTime(), maxEpochTime(), minEpochIterations()
+ *
+ * @param numIters Minimum number of iterations per epoch.
+ */
+ Bench& minEpochIterations(uint64_t numIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t minEpochIterations() const noexcept;
+
+ /**
+ * Sets exactly the number of iterations for each epoch. Ignores all other epoch limits. This forces nanobench to use exactly
+ * the given number of iterations for each epoch, not more and not less. Default is 0 (disabled).
+ *
+ * @param numIters Exact number of iterations to use. Set to 0 to disable.
+ */
+ Bench& epochIterations(uint64_t numIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t epochIterations() const noexcept;
+
+ /**
+ * @brief Sets a number of iterations that are initially performed without any measurements.
+ *
+ * Some benchmarks need a few evaluations to warm up caches / database / whatever access. Normally this should not be needed, since
+ * we show the median result so initial outliers will be filtered away automatically. If the warmup effect is large though, you
+ * might want to set it. Default is 0.
+ *
+ * @param numWarmupIters Number of warmup iterations.
+ */
+ Bench& warmup(uint64_t numWarmupIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t warmup() const noexcept;
+
+ /**
+ * @brief Marks the next run as the baseline.
+ *
+ * Call `relative(true)` to mark the run as the baseline. Successive runs will be compared to this run. It is calculated by
+ *
+ * @f[
+ * 100\% * \frac{baseline}{runtime}
+ * @f]
+ *
+ * * 100% means it is exactly as fast as the baseline
+ * * >100% means it is faster than the baseline. E.g. 200% means the current run is twice as fast as the baseline.
+ * * <100% means it is slower than the baseline. E.g. 50% means it is twice as slow as the baseline.
+ *
+ * See the tutorial section "Comparing Results" for example usage.
+ *
+ * @param isRelativeEnabled True to enable processing
+ */
+ Bench& relative(bool isRelativeEnabled) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool relative() const noexcept;
+
+ /**
+ * @brief Enables/disables performance counters.
+ *
+ * On Linux nanobench has a powerful feature to use performance counters. This enables counting of retired instructions, count
+ * number of branches, missed branches, etc. On default this is enabled, but you can disable it if you don't need that feature.
+ *
+ * @param showPerformanceCounters True to enable, false to disable.
+ */
+ Bench& performanceCounters(bool showPerformanceCounters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool performanceCounters() const noexcept;
+
+ /**
+ * @brief Retrieves all benchmark results collected by the bench object so far.
+ *
+ * Each call to run() generates a Result that is stored within the Bench instance. This is mostly for advanced users who want to
+ * see all the nitty gritty detials.
+ *
+ * @return All results collected so far.
+ */
+ ANKERL_NANOBENCH(NODISCARD) std::vector<Result> const& results() const noexcept;
+
+ /*!
+ @verbatim embed:rst
+
+ Convenience shortcut to :cpp:func:`ankerl::nanobench::doNotOptimizeAway`.
+
+ @endverbatim
+ */
+ template <typename Arg>
+ Bench& doNotOptimizeAway(Arg&& arg);
+
+ /*!
+ @verbatim embed:rst
+
+ Sets N for asymptotic complexity calculation, so it becomes possible to calculate `Big O
+ <https://en.wikipedia.org/wiki/Big_O_notation>`_ from multiple benchmark evaluations.
+
+ Use :cpp:func:`ankerl::nanobench::Bench::complexityBigO` when the evaluation has finished. See the tutorial
+ :ref:`asymptotic-complexity` for details.
+
+ @endverbatim
+
+ @tparam T Any type is cast to `double`.
+ @param b Length of N for the next benchmark run, so it is possible to calculate `bigO`.
+ */
+ template <typename T>
+ Bench& complexityN(T b) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept;
+
+ /*!
+ Calculates [Big O](https://en.wikipedia.org/wiki/Big_O_notation>) of the results with all preconfigured complexity functions.
+ Currently these complexity functions are fitted into the benchmark results:
+
+ @f$ \mathcal{O}(1) @f$,
+ @f$ \mathcal{O}(n) @f$,
+ @f$ \mathcal{O}(\log{}n) @f$,
+ @f$ \mathcal{O}(n\log{}n) @f$,
+ @f$ \mathcal{O}(n^2) @f$,
+ @f$ \mathcal{O}(n^3) @f$.
+
+ If we e.g. evaluate the complexity of `std::sort`, this is the result of `std::cout << bench.complexityBigO()`:
+
+ ```
+ | coefficient | err% | complexity
+ |--------------:|-------:|------------
+ | 5.08935e-09 | 2.6% | O(n log n)
+ | 6.10608e-08 | 8.0% | O(n)
+ | 1.29307e-11 | 47.2% | O(n^2)
+ | 2.48677e-15 | 69.6% | O(n^3)
+ | 9.88133e-06 | 132.3% | O(log n)
+ | 5.98793e-05 | 162.5% | O(1)
+ ```
+
+ So in this case @f$ \mathcal{O}(n\log{}n) @f$ provides the best approximation.
+
+ @verbatim embed:rst
+ See the tutorial :ref:`asymptotic-complexity` for details.
+ @endverbatim
+ @return Evaluation results, which can be printed or otherwise inspected.
+ */
+ std::vector<BigO> complexityBigO() const;
+
+ /**
+ * @brief Calculates bigO for a custom function.
+ *
+ * E.g. to calculate the mean squared error for @f$ \mathcal{O}(\log{}\log{}n) @f$, which is not part of the default set of
+ * complexityBigO(), you can do this:
+ *
+ * ```
+ * auto logLogN = bench.complexityBigO("O(log log n)", [](double n) {
+ * return std::log2(std::log2(n));
+ * });
+ * ```
+ *
+ * The resulting mean squared error can be printed with `std::cout << logLogN`. E.g. it prints something like this:
+ *
+ * ```text
+ * 2.46985e-05 * O(log log n), rms=1.48121
+ * ```
+ *
+ * @tparam Op Type of mapping operation.
+ * @param name Name for the function, e.g. "O(log log n)"
+ * @param op Op's operator() maps a `double` with the desired complexity function, e.g. `log2(log2(n))`.
+ * @return BigO Error calculation, which is streamable to std::cout.
+ */
+ template <typename Op>
+ BigO complexityBigO(char const* name, Op op) const;
+
+ template <typename Op>
+ BigO complexityBigO(std::string const& name, Op op) const;
+
+ /*!
+ @verbatim embed:rst
+
+ Convenience shortcut to :cpp:func:`ankerl::nanobench::render`.
+
+ @endverbatim
+ */
+ Bench& render(char const* templateContent, std::ostream& os);
+
+ Bench& config(Config const& benchmarkConfig);
+ ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept;
+
+private:
+ Config mConfig{};
+ std::vector<Result> mResults{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+/**
+ * @brief Makes sure none of the given arguments are optimized away by the compiler.
+ *
+ * @tparam Arg Type of the argument that shouldn't be optimized away.
+ * @param arg The input that we mark as being used, even though we don't do anything with it.
+ */
+template <typename Arg>
+void doNotOptimizeAway(Arg&& arg);
+
+namespace detail {
+
+#if defined(_MSC_VER)
+void doNotOptimizeAwaySink(void const*);
+
+template <typename T>
+void doNotOptimizeAway(T const& val);
+
+#else
+
+// see folly's Benchmark.h
+template <typename T>
+constexpr bool doNotOptimizeNeedsIndirect() {
+ using Decayed = typename std::decay<T>::type;
+ return !ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(Decayed) || sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value;
+}
+
+template <typename T>
+typename std::enable_if<!doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) {
+ // NOLINTNEXTLINE(hicpp-no-assembler)
+ asm volatile("" ::"r"(val));
+}
+
+template <typename T>
+typename std::enable_if<doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) {
+ // NOLINTNEXTLINE(hicpp-no-assembler)
+ asm volatile("" ::"m"(val) : "memory");
+}
+#endif
+
+// internally used, but visible because run() is templated.
+// Not movable/copy-able, so we simply use a pointer instead of unique_ptr. This saves us from
+// having to include <memory>, and the template instantiation overhead of unique_ptr which is unfortunately quite significant.
+ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH)
+class IterationLogic {
+public:
+ explicit IterationLogic(Bench const& config) noexcept;
+ ~IterationLogic();
+
+ ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept;
+ void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept;
+ void moveResultTo(std::vector<Result>& results) noexcept;
+
+private:
+ struct Impl;
+ Impl* mPimpl;
+};
+ANKERL_NANOBENCH(IGNORE_EFFCPP_POP)
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class PerformanceCounters {
+public:
+ PerformanceCounters(PerformanceCounters const&) = delete;
+ PerformanceCounters& operator=(PerformanceCounters const&) = delete;
+
+ PerformanceCounters();
+ ~PerformanceCounters();
+
+ void beginMeasure();
+ void endMeasure();
+ void updateResults(uint64_t numIters);
+
+ ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& val() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& has() const noexcept;
+
+private:
+#if ANKERL_NANOBENCH(PERF_COUNTERS)
+ LinuxPerformanceCounters* mPc = nullptr;
+#endif
+ PerfCountSet<uint64_t> mVal{};
+ PerfCountSet<bool> mHas{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Gets the singleton
+PerformanceCounters& performanceCounters();
+
+} // namespace detail
+
+class BigO {
+public:
+ using RangeMeasure = std::vector<std::pair<double, double>>;
+
+ template <typename Op>
+ static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op) {
+ for (auto& rangeMeasure : data) {
+ rangeMeasure.first = op(rangeMeasure.first);
+ }
+ return data;
+ }
+
+ static RangeMeasure collectRangeMeasure(std::vector<Result> const& results);
+
+ template <typename Op>
+ BigO(char const* bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+
+ template <typename Op>
+ BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+
+ BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure);
+ BigO(std::string const& bigOName, RangeMeasure const& scaledRangeMeasure);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool operator<(BigO const& other) const noexcept;
+
+private:
+ std::string mName{};
+ double mConstant{};
+ double mNormalizedRootMeanSquare{};
+};
+std::ostream& operator<<(std::ostream& os, BigO const& bigO);
+std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs);
+
+} // namespace nanobench
+} // namespace ankerl
+
+// implementation /////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+constexpr uint64_t(Rng::min)() {
+ return 0;
+}
+
+constexpr uint64_t(Rng::max)() {
+ return (std::numeric_limits<uint64_t>::max)();
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint64_t Rng::operator()() noexcept {
+ auto x = mX;
+
+ mX = UINT64_C(15241094284759029579) * mY;
+ mY = rotl(mY - x, 27);
+
+ return x;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint32_t Rng::bounded(uint32_t range) noexcept {
+ uint64_t r32 = static_cast<uint32_t>(operator()());
+ auto multiresult = r32 * range;
+ return static_cast<uint32_t>(multiresult >> 32U);
+}
+
+double Rng::uniform01() noexcept {
+ auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U);
+ // can't use union in c++ here for type puning, it's undefined behavior.
+ // std::memcpy is optimized anyways.
+ double d;
+ std::memcpy(&d, &i, sizeof(double));
+ return d - 1.0;
+}
+
+template <typename Container>
+void Rng::shuffle(Container& container) noexcept {
+ auto size = static_cast<uint32_t>(container.size());
+ for (auto i = size; i > 1U; --i) {
+ using std::swap;
+ auto p = bounded(i); // number in [0, i)
+ swap(container[i - 1], container[p]);
+ }
+}
+
+constexpr uint64_t Rng::rotl(uint64_t x, unsigned k) noexcept {
+ return (x << k) | (x >> (64U - k));
+}
+
+template <typename Op>
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+Bench& Bench::run(Op&& op) {
+ // It is important that this method is kept short so the compiler can do better optimizations/ inlining of op()
+ detail::IterationLogic iterationLogic(*this);
+ auto& pc = detail::performanceCounters();
+
+ while (auto n = iterationLogic.numIters()) {
+ pc.beginMeasure();
+ Clock::time_point before = Clock::now();
+ while (n-- > 0) {
+ op();
+ }
+ Clock::time_point after = Clock::now();
+ pc.endMeasure();
+ pc.updateResults(iterationLogic.numIters());
+ iterationLogic.add(after - before, pc);
+ }
+ iterationLogic.moveResultTo(mResults);
+ return *this;
+}
+
+// Performs all evaluations.
+template <typename Op>
+Bench& Bench::run(char const* benchmarkName, Op&& op) {
+ name(benchmarkName);
+ return run(std::forward<Op>(op));
+}
+
+template <typename Op>
+Bench& Bench::run(std::string const& benchmarkName, Op&& op) {
+ name(benchmarkName);
+ return run(std::forward<Op>(op));
+}
+
+template <typename Op>
+BigO Bench::complexityBigO(char const* benchmarkName, Op op) const {
+ return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op);
+}
+
+template <typename Op>
+BigO Bench::complexityBigO(std::string const& benchmarkName, Op op) const {
+ return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op);
+}
+
+// Set the batch size, e.g. number of processed bytes, or some other metric for the size of the processed data in each iteration.
+// Any argument is cast to double.
+template <typename T>
+Bench& Bench::batch(T b) noexcept {
+ mConfig.mBatch = static_cast<double>(b);
+ return *this;
+}
+
+// Sets the computation complexity of the next run. Any argument is cast to double.
+template <typename T>
+Bench& Bench::complexityN(T n) noexcept {
+ mConfig.mComplexityN = static_cast<double>(n);
+ return *this;
+}
+
+// Convenience: makes sure none of the given arguments are optimized away by the compiler.
+template <typename Arg>
+Bench& Bench::doNotOptimizeAway(Arg&& arg) {
+ detail::doNotOptimizeAway(std::forward<Arg>(arg));
+ return *this;
+}
+
+// Makes sure none of the given arguments are optimized away by the compiler.
+template <typename Arg>
+void doNotOptimizeAway(Arg&& arg) {
+ detail::doNotOptimizeAway(std::forward<Arg>(arg));
+}
+
+namespace detail {
+
+#if defined(_MSC_VER)
+template <typename T>
+void doNotOptimizeAway(T const& val) {
+ doNotOptimizeAwaySink(&val);
+}
+
+#endif
+
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+#if defined(ANKERL_NANOBENCH_IMPLEMENT)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// implementation part - only visible in .cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+# include <algorithm> // sort, reverse
+# include <atomic> // compare_exchange_strong in loop overhead
+# include <cstdlib> // getenv
+# include <cstring> // strstr, strncmp
+# include <fstream> // ifstream to parse proc files
+# include <iomanip> // setw, setprecision
+# include <iostream> // cout
+# include <numeric> // accumulate
+# include <random> // random_device
+# include <sstream> // to_s in Number
+# include <stdexcept> // throw for rendering templates
+# include <tuple> // std::tie
+# if defined(__linux__)
+# include <unistd.h> //sysconf
+# endif
+# if ANKERL_NANOBENCH(PERF_COUNTERS)
+# include <map> // map
+
+# include <linux/perf_event.h>
+# include <sys/ioctl.h>
+# include <sys/syscall.h>
+# include <unistd.h>
+# endif
+
+// declarations ///////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+// helper stuff that is only intended to be used internally
+namespace detail {
+
+struct TableInfo;
+
+// formatting utilities
+namespace fmt {
+
+class NumSep;
+class StreamStateRestorer;
+class Number;
+class MarkDownColumn;
+class MarkDownCode;
+
+} // namespace fmt
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// definitions ////////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+uint64_t splitMix64(uint64_t& state) noexcept;
+
+namespace detail {
+
+// helpers to get double values
+template <typename T>
+inline double d(T t) noexcept {
+ return static_cast<double>(t);
+}
+inline double d(Clock::duration duration) noexcept {
+ return std::chrono::duration_cast<std::chrono::duration<double>>(duration).count();
+}
+
+// Calculates clock resolution once, and remembers the result
+inline Clock::duration clockResolution() noexcept;
+
+} // namespace detail
+
+namespace templates {
+
+char const* csv() noexcept {
+ return R"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total"
+{{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}}
+{{/result}})DELIM";
+}
+
+char const* htmlBoxplot() noexcept {
+ return R"DELIM(<html>
+
+<head>
+ <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
+</head>
+
+<body>
+ <div id="myDiv"></div>
+ <script>
+ var data = [
+ {{#result}}{
+ name: '{{name}}',
+ y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}],
+ },
+ {{/result}}
+ ];
+ var title = '{{title}}';
+
+ data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' }));
+ var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true});
+ </script>
+</body>
+
+</html>)DELIM";
+}
+
+char const* json() noexcept {
+ return R"DELIM({
+ "results": [
+{{#result}} {
+ "title": "{{title}}",
+ "name": "{{name}}",
+ "unit": "{{unit}}",
+ "batch": {{batch}},
+ "complexityN": {{complexityN}},
+ "epochs": {{epochs}},
+ "clockResolution": {{clockResolution}},
+ "clockResolutionMultiple": {{clockResolutionMultiple}},
+ "maxEpochTime": {{maxEpochTime}},
+ "minEpochTime": {{minEpochTime}},
+ "minEpochIterations": {{minEpochIterations}},
+ "epochIterations": {{epochIterations}},
+ "warmup": {{warmup}},
+ "relative": {{relative}},
+ "median(elapsed)": {{median(elapsed)}},
+ "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}},
+ "median(instructions)": {{median(instructions)}},
+ "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}},
+ "median(cpucycles)": {{median(cpucycles)}},
+ "median(contextswitches)": {{median(contextswitches)}},
+ "median(pagefaults)": {{median(pagefaults)}},
+ "median(branchinstructions)": {{median(branchinstructions)}},
+ "median(branchmisses)": {{median(branchmisses)}},
+ "totalTime": {{sumProduct(iterations, elapsed)}},
+ "measurements": [
+{{#measurement}} {
+ "iterations": {{iterations}},
+ "elapsed": {{elapsed}},
+ "pagefaults": {{pagefaults}},
+ "cpucycles": {{cpucycles}},
+ "contextswitches": {{contextswitches}},
+ "instructions": {{instructions}},
+ "branchinstructions": {{branchinstructions}},
+ "branchmisses": {{branchmisses}}
+ }{{^-last}},{{/-last}}
+{{/measurement}} ]
+ }{{^-last}},{{/-last}}
+{{/result}} ]
+})DELIM";
+}
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct Node {
+ enum class Type { tag, content, section, inverted_section };
+
+ char const* begin;
+ char const* end;
+ std::vector<Node> children;
+ Type type;
+
+ template <size_t N>
+ // NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
+ bool operator==(char const (&str)[N]) const noexcept {
+ return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
+ }
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+static std::vector<Node> parseMustacheTemplate(char const** tpl) {
+ std::vector<Node> nodes;
+
+ while (true) {
+ auto begin = std::strstr(*tpl, "{{");
+ auto end = begin;
+ if (begin != nullptr) {
+ begin += 2;
+ end = std::strstr(begin, "}}");
+ }
+
+ if (begin == nullptr || end == nullptr) {
+ // nothing found, finish node
+ nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
+ return nodes;
+ }
+
+ nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
+
+ // we found a tag
+ *tpl = end + 2;
+ switch (*begin) {
+ case '/':
+ // finished! bail out
+ return nodes;
+
+ case '#':
+ nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
+ break;
+
+ case '^':
+ nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
+ break;
+
+ default:
+ nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag});
+ break;
+ }
+ }
+}
+
+static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) {
+ bool matchFirst = n == "-first";
+ bool matchLast = n == "-last";
+ if (!matchFirst && !matchLast) {
+ return false;
+ }
+
+ bool doWrite = false;
+ if (n.type == Node::Type::section) {
+ doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1);
+ } else if (n.type == Node::Type::inverted_section) {
+ doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1);
+ }
+
+ if (doWrite) {
+ for (auto const& child : n.children) {
+ if (child.type == Node::Type::content) {
+ out.write(child.begin, std::distance(child.begin, child.end));
+ }
+ }
+ }
+ return true;
+}
+
+static bool matchCmdArgs(std::string const& str, std::vector<std::string>& matchResult) {
+ matchResult.clear();
+ auto idxOpen = str.find('(');
+ auto idxClose = str.find(')', idxOpen);
+ if (idxClose == std::string::npos) {
+ return false;
+ }
+
+ matchResult.emplace_back(str.substr(0, idxOpen));
+
+ // split by comma
+ matchResult.emplace_back(std::string{});
+ for (size_t i = idxOpen + 1; i != idxClose; ++i) {
+ if (str[i] == ' ' || str[i] == '\t') {
+ // skip whitespace
+ continue;
+ }
+ if (str[i] == ',') {
+ // got a comma => new string
+ matchResult.emplace_back(std::string{});
+ continue;
+ }
+ // no whitespace no comma, append
+ matchResult.back() += str[i];
+ }
+ return true;
+}
+
+static bool generateConfigTag(Node const& n, Config const& config, std::ostream& out) {
+ using detail::d;
+
+ if (n == "title") {
+ out << config.mBenchmarkTitle;
+ return true;
+ } else if (n == "name") {
+ out << config.mBenchmarkName;
+ return true;
+ } else if (n == "unit") {
+ out << config.mUnit;
+ return true;
+ } else if (n == "batch") {
+ out << config.mBatch;
+ return true;
+ } else if (n == "complexityN") {
+ out << config.mComplexityN;
+ return true;
+ } else if (n == "epochs") {
+ out << config.mNumEpochs;
+ return true;
+ } else if (n == "clockResolution") {
+ out << d(detail::clockResolution());
+ return true;
+ } else if (n == "clockResolutionMultiple") {
+ out << config.mClockResolutionMultiple;
+ return true;
+ } else if (n == "maxEpochTime") {
+ out << d(config.mMaxEpochTime);
+ return true;
+ } else if (n == "minEpochTime") {
+ out << d(config.mMinEpochTime);
+ return true;
+ } else if (n == "minEpochIterations") {
+ out << config.mMinEpochIterations;
+ return true;
+ } else if (n == "epochIterations") {
+ out << config.mEpochIterations;
+ return true;
+ } else if (n == "warmup") {
+ out << config.mWarmup;
+ return true;
+ } else if (n == "relative") {
+ out << config.mIsRelative;
+ return true;
+ }
+ return false;
+}
+
+static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) {
+ if (generateConfigTag(n, r.config(), out)) {
+ return out;
+ }
+ // match e.g. "median(elapsed)"
+ // g++ 4.8 doesn't implement std::regex :(
+ // static std::regex const regOpArg1("^([a-zA-Z]+)\\(([a-zA-Z]*)\\)$");
+ // std::cmatch matchResult;
+ // if (std::regex_match(n.begin, n.end, matchResult, regOpArg1)) {
+ std::vector<std::string> matchResult;
+ if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
+ if (matchResult.size() == 2) {
+ auto m = Result::fromString(matchResult[1]);
+ if (m == Result::Measure::_size) {
+ return out << 0.0;
+ }
+
+ if (matchResult[0] == "median") {
+ return out << r.median(m);
+ }
+ if (matchResult[0] == "average") {
+ return out << r.average(m);
+ }
+ if (matchResult[0] == "medianAbsolutePercentError") {
+ return out << r.medianAbsolutePercentError(m);
+ }
+ if (matchResult[0] == "sum") {
+ return out << r.sum(m);
+ }
+ if (matchResult[0] == "minimum") {
+ return out << r.minimum(m);
+ }
+ if (matchResult[0] == "maximum") {
+ return out << r.maximum(m);
+ }
+ } else if (matchResult.size() == 3) {
+ auto m1 = Result::fromString(matchResult[1]);
+ auto m2 = Result::fromString(matchResult[2]);
+ if (m1 == Result::Measure::_size || m2 == Result::Measure::_size) {
+ return out << 0.0;
+ }
+
+ if (matchResult[0] == "sumProduct") {
+ return out << r.sumProduct(m1, m2);
+ }
+ }
+ }
+
+ // match e.g. "sumProduct(elapsed, iterations)"
+ // static std::regex const regOpArg2("^([a-zA-Z]+)\\(([a-zA-Z]*)\\s*,\\s+([a-zA-Z]*)\\)$");
+
+ // nothing matches :(
+ throw std::runtime_error("command '" + std::string(n.begin, n.end) + "' not understood");
+}
+
+static void generateResultMeasurement(std::vector<Node> const& nodes, size_t idx, Result const& r, std::ostream& out) {
+ for (auto const& n : nodes) {
+ if (!generateFirstLast(n, idx, r.size(), out)) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case Node::Type::inverted_section:
+ throw std::runtime_error("got a inverted section inside measurement");
+
+ case Node::Type::section:
+ throw std::runtime_error("got a section inside measurement");
+
+ case Node::Type::tag: {
+ auto m = Result::fromString(std::string(n.begin, n.end));
+ if (m == Result::Measure::_size || !r.has(m)) {
+ out << 0.0;
+ } else {
+ out << r.get(idx, m);
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void generateResult(std::vector<Node> const& nodes, size_t idx, std::vector<Result> const& results, std::ostream& out) {
+ auto const& r = results[idx];
+ for (auto const& n : nodes) {
+ if (!generateFirstLast(n, idx, results.size(), out)) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case Node::Type::inverted_section:
+ throw std::runtime_error("got a inverted section inside result");
+
+ case Node::Type::section:
+ if (n == "measurement") {
+ for (size_t i = 0; i < r.size(); ++i) {
+ generateResultMeasurement(n.children, i, r, out);
+ }
+ } else {
+ throw std::runtime_error("got a section inside result");
+ }
+ break;
+
+ case Node::Type::tag:
+ generateResultTag(n, r, out);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace templates
+
+// helper stuff that only intended to be used internally
+namespace detail {
+
+char const* getEnv(char const* name);
+bool isEndlessRunning(std::string const& name);
+
+template <typename T>
+T parseFile(std::string const& filename);
+
+void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
+void printStabilityInformationOnce(std::ostream* os);
+
+// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
+uint64_t& singletonHeaderHash() noexcept;
+
+// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference.
+Clock::duration calcClockResolution(size_t numEvaluations) noexcept;
+
+// formatting utilities
+namespace fmt {
+
+// adds thousands separator to numbers
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class NumSep : public std::numpunct<char> {
+public:
+ explicit NumSep(char sep);
+ char do_thousands_sep() const override;
+ std::string do_grouping() const override;
+
+private:
+ char mSep;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// RAII to save & restore a stream's state
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class StreamStateRestorer {
+public:
+ explicit StreamStateRestorer(std::ostream& s);
+ ~StreamStateRestorer();
+
+ // sets back all stream info that we remembered at construction
+ void restore();
+
+ // don't allow copying / moving
+ StreamStateRestorer(StreamStateRestorer const&) = delete;
+ StreamStateRestorer& operator=(StreamStateRestorer const&) = delete;
+ StreamStateRestorer(StreamStateRestorer&&) = delete;
+ StreamStateRestorer& operator=(StreamStateRestorer&&) = delete;
+
+private:
+ std::ostream& mStream;
+ std::locale mLocale;
+ std::streamsize const mPrecision;
+ std::streamsize const mWidth;
+ std::ostream::char_type const mFill;
+ std::ostream::fmtflags const mFmtFlags;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Number formatter
+class Number {
+public:
+ Number(int width, int precision, double value);
+ Number(int width, int precision, int64_t value);
+ std::string to_s() const;
+
+private:
+ friend std::ostream& operator<<(std::ostream& os, Number const& n);
+ std::ostream& write(std::ostream& os) const;
+
+ int mWidth;
+ int mPrecision;
+ double mValue;
+};
+
+// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent
+std::string to_s(uint64_t s);
+
+std::ostream& operator<<(std::ostream& os, Number const& n);
+
+class MarkDownColumn {
+public:
+ MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val);
+ std::string title() const;
+ std::string separator() const;
+ std::string invalid() const;
+ std::string value() const;
+
+private:
+ int mWidth;
+ int mPrecision;
+ std::string mTitle;
+ std::string mSuffix;
+ double mValue;
+};
+
+// Formats any text as markdown code, escaping backticks.
+class MarkDownCode {
+public:
+ explicit MarkDownCode(std::string const& what);
+
+private:
+ friend std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
+ std::ostream& write(std::ostream& os) const;
+
+ std::string mWhat{};
+};
+
+std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
+
+} // namespace fmt
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// implementation /////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) {
+ detail::fmt::StreamStateRestorer restorer(out);
+
+ out.precision(std::numeric_limits<double>::digits10);
+ auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
+
+ for (auto const& n : nodes) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case templates::Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case templates::Node::Type::inverted_section:
+ throw std::runtime_error("unknown list '" + std::string(n.begin, n.end) + "'");
+
+ case templates::Node::Type::section:
+ if (n == "result") {
+ const size_t nbResults = results.size();
+ for (size_t i = 0; i < nbResults; ++i) {
+ generateResult(n.children, i, results, out);
+ }
+ } else {
+ throw std::runtime_error("unknown section '" + std::string(n.begin, n.end) + "'");
+ }
+ break;
+
+ case templates::Node::Type::tag:
+ // This just uses the last result's config.
+ if (!generateConfigTag(n, results.back().config(), out)) {
+ throw std::runtime_error("unknown tag '" + std::string(n.begin, n.end) + "'");
+ }
+ break;
+ }
+ }
+}
+
+void render(char const* mustacheTemplate, const Bench& bench, std::ostream& out) {
+ render(mustacheTemplate, bench.results(), out);
+}
+
+namespace detail {
+
+PerformanceCounters& performanceCounters() {
+# if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wexit-time-destructors"
+# endif
+ static PerformanceCounters pc;
+# if defined(__clang__)
+# pragma clang diagnostic pop
+# endif
+ return pc;
+}
+
+// Windows version of doNotOptimizeAway
+// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
+// see https://github.com/facebook/folly/blob/master/folly/Benchmark.h#L280
+// see https://docs.microsoft.com/en-us/cpp/preprocessor/optimize
+# if defined(_MSC_VER)
+# pragma optimize("", off)
+void doNotOptimizeAwaySink(void const*) {}
+# pragma optimize("", on)
+# endif
+
+template <typename T>
+T parseFile(std::string const& filename) {
+ std::ifstream fin(filename);
+ T num{};
+ fin >> num;
+ return num;
+}
+
+char const* getEnv(char const* name) {
+# if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe.
+# endif
+ return std::getenv(name);
+# if defined(_MSC_VER)
+# pragma warning(pop)
+# endif
+}
+
+bool isEndlessRunning(std::string const& name) {
+ auto endless = getEnv("NANOBENCH_ENDLESS");
+ return nullptr != endless && endless == name;
+}
+
+void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) {
+ warnings.clear();
+ recommendations.clear();
+
+ bool recommendCheckFlags = false;
+
+# if defined(DEBUG)
+ warnings.emplace_back("DEBUG defined");
+ recommendCheckFlags = true;
+# endif
+
+ bool recommendPyPerf = false;
+# if defined(__linux__)
+ auto nprocs = sysconf(_SC_NPROCESSORS_CONF);
+ if (nprocs <= 0) {
+ warnings.emplace_back("couldn't figure out number of processors - no governor, turbo check possible");
+ } else {
+
+ // check frequency scaling
+ for (long id = 0; id < nprocs; ++id) {
+ auto idStr = detail::fmt::to_s(static_cast<uint64_t>(id));
+ auto sysCpu = "/sys/devices/system/cpu/cpu" + idStr;
+ auto minFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_min_freq");
+ auto maxFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_max_freq");
+ if (minFreq != maxFreq) {
+ auto minMHz = static_cast<double>(minFreq) / 1000.0;
+ auto maxMHz = static_cast<double>(maxFreq) / 1000.0;
+ warnings.emplace_back("CPU frequency scaling enabled: CPU " + idStr + " between " +
+ detail::fmt::Number(1, 1, minMHz).to_s() + " and " + detail::fmt::Number(1, 1, maxMHz).to_s() +
+ " MHz");
+ recommendPyPerf = true;
+ break;
+ }
+ }
+
+ auto currentGovernor = parseFile<std::string>("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
+ if ("performance" != currentGovernor) {
+ warnings.emplace_back("CPU governor is '" + currentGovernor + "' but should be 'performance'");
+ recommendPyPerf = true;
+ }
+
+ if (0 == parseFile<int>("/sys/devices/system/cpu/intel_pstate/no_turbo")) {
+ warnings.emplace_back("Turbo is enabled, CPU frequency will fluctuate");
+ recommendPyPerf = true;
+ }
+ }
+# endif
+
+ if (recommendCheckFlags) {
+ recommendations.emplace_back("Make sure you compile for Release");
+ }
+ if (recommendPyPerf) {
+ recommendations.emplace_back("Use 'pyperf system tune' before benchmarking. See https://github.com/vstinner/pyperf");
+ }
+}
+
+void printStabilityInformationOnce(std::ostream* outStream) {
+ static bool shouldPrint = true;
+ if (shouldPrint && outStream) {
+ auto& os = *outStream;
+ shouldPrint = false;
+ std::vector<std::string> warnings;
+ std::vector<std::string> recommendations;
+ gatherStabilityInformation(warnings, recommendations);
+ if (warnings.empty()) {
+ return;
+ }
+
+ os << "Warning, results might be unstable:" << std::endl;
+ for (auto const& w : warnings) {
+ os << "* " << w << std::endl;
+ }
+
+ os << std::endl << "Recommendations" << std::endl;
+ for (auto const& r : recommendations) {
+ os << "* " << r << std::endl;
+ }
+ }
+}
+
+// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
+uint64_t& singletonHeaderHash() noexcept {
+ static uint64_t sHeaderHash{};
+ return sHeaderHash;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+inline uint64_t fnv1a(std::string const& str) noexcept {
+ auto val = UINT64_C(14695981039346656037);
+ for (auto c : str) {
+ val = (val ^ static_cast<uint8_t>(c)) * UINT64_C(1099511628211);
+ }
+ return val;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+inline uint64_t hash_combine(uint64_t seed, uint64_t val) {
+ return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U));
+}
+
+// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference.
+Clock::duration calcClockResolution(size_t numEvaluations) noexcept {
+ auto bestDuration = Clock::duration::max();
+ Clock::time_point tBegin;
+ Clock::time_point tEnd;
+ for (size_t i = 0; i < numEvaluations; ++i) {
+ tBegin = Clock::now();
+ do {
+ tEnd = Clock::now();
+ } while (tBegin == tEnd);
+ bestDuration = (std::min)(bestDuration, tEnd - tBegin);
+ }
+ return bestDuration;
+}
+
+// Calculates clock resolution once, and remembers the result
+Clock::duration clockResolution() noexcept {
+ static Clock::duration sResolution = calcClockResolution(20);
+ return sResolution;
+}
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct IterationLogic::Impl {
+ enum class State { warmup, upscaling_runtime, measuring, endless };
+
+ explicit Impl(Bench const& bench)
+ : mBench(bench)
+ , mResult(bench.config()) {
+ printStabilityInformationOnce(mBench.output());
+
+ // determine target runtime per epoch
+ mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple();
+ if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) {
+ mTargetRuntimePerEpoch = mBench.maxEpochTime();
+ }
+ if (mTargetRuntimePerEpoch < mBench.minEpochTime()) {
+ mTargetRuntimePerEpoch = mBench.minEpochTime();
+ }
+
+ if (isEndlessRunning(mBench.name())) {
+ std::cerr << "NANOBENCH_ENDLESS set: running '" << mBench.name() << "' endlessly" << std::endl;
+ mNumIters = (std::numeric_limits<uint64_t>::max)();
+ mState = State::endless;
+ } else if (0 != mBench.warmup()) {
+ mNumIters = mBench.warmup();
+ mState = State::warmup;
+ } else if (0 != mBench.epochIterations()) {
+ // exact number of iterations
+ mNumIters = mBench.epochIterations();
+ mState = State::measuring;
+ } else {
+ mNumIters = mBench.minEpochIterations();
+ mState = State::upscaling_runtime;
+ }
+ }
+
+ // directly calculates new iters based on elapsed&iters, and adds a 10% noise. Makes sure we don't underflow.
+ ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept {
+ auto doubleElapsed = d(elapsed);
+ auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch);
+ auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters);
+
+ auto doubleMinEpochIters = d(mBench.minEpochIterations());
+ if (doubleNewIters < doubleMinEpochIters) {
+ doubleNewIters = doubleMinEpochIters;
+ }
+ doubleNewIters *= 1.0 + 0.2 * mRng.uniform01();
+
+ // +0.5 for correct rounding when casting
+ // NOLINTNEXTLINE(bugprone-incorrect-roundings)
+ return static_cast<uint64_t>(doubleNewIters + 0.5);
+ }
+
+ ANKERL_NANOBENCH_NO_SANITIZE("integer") void upscale(std::chrono::nanoseconds elapsed) {
+ if (elapsed * 10 < mTargetRuntimePerEpoch) {
+ // we are far below the target runtime. Multiply iterations by 10 (with overflow check)
+ if (mNumIters * 10 < mNumIters) {
+ // overflow :-(
+ showResult("iterations overflow. Maybe your code got optimized away?");
+ mNumIters = 0;
+ return;
+ }
+ mNumIters *= 10;
+ } else {
+ mNumIters = calcBestNumIters(elapsed, mNumIters);
+ }
+ }
+
+ void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept {
+# if defined(ANKERL_NANOBENCH_LOG_ENABLED)
+ auto oldIters = mNumIters;
+# endif
+
+ switch (mState) {
+ case State::warmup:
+ if (isCloseEnoughForMeasurements(elapsed)) {
+ // if elapsed is close enough, we can skip upscaling and go right to measurements
+ // still, we don't add the result to the measurements.
+ mState = State::measuring;
+ mNumIters = calcBestNumIters(elapsed, mNumIters);
+ } else {
+ // not close enough: switch to upscaling
+ mState = State::upscaling_runtime;
+ upscale(elapsed);
+ }
+ break;
+
+ case State::upscaling_runtime:
+ if (isCloseEnoughForMeasurements(elapsed)) {
+ // if we are close enough, add measurement and switch to always measuring
+ mState = State::measuring;
+ mTotalElapsed += elapsed;
+ mTotalNumIters += mNumIters;
+ mResult.add(elapsed, mNumIters, pc);
+ mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
+ } else {
+ upscale(elapsed);
+ }
+ break;
+
+ case State::measuring:
+ // just add measurements - no questions asked. Even when runtime is low. But we can't ignore
+ // that fluctuation, or else we would bias the result
+ mTotalElapsed += elapsed;
+ mTotalNumIters += mNumIters;
+ mResult.add(elapsed, mNumIters, pc);
+ if (0 != mBench.epochIterations()) {
+ mNumIters = mBench.epochIterations();
+ } else {
+ mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
+ }
+ break;
+
+ case State::endless:
+ mNumIters = (std::numeric_limits<uint64_t>::max)();
+ break;
+ }
+
+ if (static_cast<uint64_t>(mResult.size()) == mBench.epochs()) {
+ // we got all the results that we need, finish it
+ showResult("");
+ mNumIters = 0;
+ }
+
+ ANKERL_NANOBENCH_LOG(mBench.name() << ": " << detail::fmt::Number(20, 3, static_cast<double>(elapsed.count())) << " elapsed, "
+ << detail::fmt::Number(20, 3, static_cast<double>(mTargetRuntimePerEpoch.count()))
+ << " target. oldIters=" << oldIters << ", mNumIters=" << mNumIters
+ << ", mState=" << static_cast<int>(mState));
+ }
+
+ void showResult(std::string const& errorMessage) const {
+ ANKERL_NANOBENCH_LOG(errorMessage);
+
+ if (mBench.output() != nullptr) {
+ // prepare column data ///////
+ std::vector<fmt::MarkDownColumn> columns;
+
+ auto rMedian = mResult.median(Result::Measure::elapsed);
+
+ if (mBench.relative()) {
+ double d = 100.0;
+ if (!mBench.results().empty()) {
+ d = rMedian <= 0.0 ? 0.0 : mBench.results().front().median(Result::Measure::elapsed) / rMedian * 100.0;
+ }
+ columns.emplace_back(11, 1, "relative", "%", d);
+ }
+
+ if (mBench.complexityN() > 0) {
+ columns.emplace_back(14, 0, "complexityN", "", mBench.complexityN());
+ }
+
+ columns.emplace_back(22, 2, "ns/" + mBench.unit(), "", 1e9 * rMedian / mBench.batch());
+ columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
+
+ double rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
+ columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0);
+
+ double rInsMedian = -1.0;
+ if (mResult.has(Result::Measure::instructions)) {
+ rInsMedian = mResult.median(Result::Measure::instructions);
+ columns.emplace_back(18, 2, "ins/" + mBench.unit(), "", rInsMedian / mBench.batch());
+ }
+
+ double rCycMedian = -1.0;
+ if (mResult.has(Result::Measure::cpucycles)) {
+ rCycMedian = mResult.median(Result::Measure::cpucycles);
+ columns.emplace_back(18, 2, "cyc/" + mBench.unit(), "", rCycMedian / mBench.batch());
+ }
+ if (rInsMedian > 0.0 && rCycMedian > 0.0) {
+ columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
+ }
+ if (mResult.has(Result::Measure::branchinstructions)) {
+ double rBraMedian = mResult.median(Result::Measure::branchinstructions);
+ columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch());
+ if (mResult.has(Result::Measure::branchmisses)) {
+ double p = 0.0;
+ if (rBraMedian >= 1e-9) {
+ p = 100.0 * mResult.median(Result::Measure::branchmisses) / rBraMedian;
+ }
+ columns.emplace_back(10, 1, "miss%", "%", p);
+ }
+ }
+
+ columns.emplace_back(12, 2, "total", "", mResult.sum(Result::Measure::elapsed));
+
+ // write everything
+ auto& os = *mBench.output();
+
+ uint64_t hash = 0;
+ hash = hash_combine(fnv1a(mBench.unit()), hash);
+ hash = hash_combine(fnv1a(mBench.title()), hash);
+ hash = hash_combine(mBench.relative(), hash);
+ hash = hash_combine(mBench.performanceCounters(), hash);
+
+ if (hash != singletonHeaderHash()) {
+ singletonHeaderHash() = hash;
+
+ // no result yet, print header
+ os << std::endl;
+ for (auto const& col : columns) {
+ os << col.title();
+ }
+ os << "| " << mBench.title() << std::endl;
+
+ for (auto const& col : columns) {
+ os << col.separator();
+ }
+ os << "|:" << std::string(mBench.title().size() + 1U, '-') << std::endl;
+ }
+
+ if (!errorMessage.empty()) {
+ for (auto const& col : columns) {
+ os << col.invalid();
+ }
+ os << "| :boom: " << fmt::MarkDownCode(mBench.name()) << " (" << errorMessage << ')' << std::endl;
+ } else {
+ for (auto const& col : columns) {
+ os << col.value();
+ }
+ os << "| ";
+ auto showUnstable = rErrorMedian >= 0.05;
+ if (showUnstable) {
+ os << ":wavy_dash: ";
+ }
+ os << fmt::MarkDownCode(mBench.name());
+ if (showUnstable) {
+ auto avgIters = static_cast<double>(mTotalNumIters) / static_cast<double>(mBench.epochs());
+ // NOLINTNEXTLINE(bugprone-incorrect-roundings)
+ auto suggestedIters = static_cast<uint64_t>(avgIters * 10 + 0.5);
+
+ os << " (Unstable with ~" << detail::fmt::Number(1, 1, avgIters)
+ << " iters. Increase `minEpochIterations` to e.g. " << suggestedIters << ")";
+ }
+ os << std::endl;
+ }
+ }
+ }
+
+ ANKERL_NANOBENCH(NODISCARD) bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed) const noexcept {
+ return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
+ }
+
+ uint64_t mNumIters = 1;
+ Bench const& mBench;
+ std::chrono::nanoseconds mTargetRuntimePerEpoch{};
+ Result mResult;
+ Rng mRng{123};
+ std::chrono::nanoseconds mTotalElapsed{};
+ uint64_t mTotalNumIters = 0;
+
+ State mState = State::upscaling_runtime;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+IterationLogic::IterationLogic(Bench const& bench) noexcept
+ : mPimpl(new Impl(bench)) {}
+
+IterationLogic::~IterationLogic() {
+ if (mPimpl) {
+ delete mPimpl;
+ }
+}
+
+uint64_t IterationLogic::numIters() const noexcept {
+ ANKERL_NANOBENCH_LOG(mPimpl->mBench.name() << ": mNumIters=" << mPimpl->mNumIters);
+ return mPimpl->mNumIters;
+}
+
+void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept {
+ mPimpl->add(elapsed, pc);
+}
+
+void IterationLogic::moveResultTo(std::vector<Result>& results) noexcept {
+ results.emplace_back(std::move(mPimpl->mResult));
+}
+
+# if ANKERL_NANOBENCH(PERF_COUNTERS)
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class LinuxPerformanceCounters {
+public:
+ struct Target {
+ Target(uint64_t* targetValue_, bool correctMeasuringOverhead_, bool correctLoopOverhead_)
+ : targetValue(targetValue_)
+ , correctMeasuringOverhead(correctMeasuringOverhead_)
+ , correctLoopOverhead(correctLoopOverhead_) {}
+
+ uint64_t* targetValue{};
+ bool correctMeasuringOverhead{};
+ bool correctLoopOverhead{};
+ };
+
+ ~LinuxPerformanceCounters();
+
+ // quick operation
+ inline void start() {}
+
+ inline void stop() {}
+
+ bool monitor(perf_sw_ids swId, Target target);
+ bool monitor(perf_hw_id hwId, Target target);
+
+ bool hasError() const noexcept {
+ return mHasError;
+ }
+
+ // Just reading data is faster than enable & disabling.
+ // we subtract data ourselves.
+ inline void beginMeasure() {
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
+ }
+
+ inline void endMeasure() {
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
+ if (mHasError) {
+ return;
+ }
+
+ auto const numBytes = sizeof(uint64_t) * mCounters.size();
+ auto ret = read(mFd, mCounters.data(), numBytes);
+ mHasError = ret != static_cast<ssize_t>(numBytes);
+ }
+
+ void updateResults(uint64_t numIters);
+
+ // rounded integer division
+ template <typename T>
+ static inline T divRounded(T a, T divisor) {
+ return (a + divisor / 2) / divisor;
+ }
+
+ template <typename Op>
+ ANKERL_NANOBENCH_NO_SANITIZE("integer")
+ void calibrate(Op&& op) {
+ // clear current calibration data,
+ for (auto& v : mCalibratedOverhead) {
+ v = UINT64_C(0);
+ }
+
+ // create new calibration data
+ auto newCalibration = mCalibratedOverhead;
+ for (auto& v : newCalibration) {
+ v = (std::numeric_limits<uint64_t>::max)();
+ }
+ for (size_t iter = 0; iter < 100; ++iter) {
+ beginMeasure();
+ op();
+ endMeasure();
+ if (mHasError) {
+ return;
+ }
+
+ for (size_t i = 0; i < newCalibration.size(); ++i) {
+ auto diff = mCounters[i];
+ if (newCalibration[i] > diff) {
+ newCalibration[i] = diff;
+ }
+ }
+ }
+
+ mCalibratedOverhead = std::move(newCalibration);
+
+ {
+ // calibrate loop overhead. For branches & instructions this makes sense, not so much for everything else like cycles.
+ // marsaglia's xorshift: mov, sal/shr, xor. Times 3.
+ // This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further.
+ // see https://godbolt.org/z/49RVQ5
+ uint64_t const numIters = 100000U + (std::random_device{}() & 3);
+ uint64_t n = numIters;
+ uint32_t x = 1234567;
+ auto fn = [&]() {
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ };
+
+ beginMeasure();
+ while (n-- > 0) {
+ fn();
+ }
+ endMeasure();
+ detail::doNotOptimizeAway(x);
+ auto measure1 = mCounters;
+
+ n = numIters;
+ beginMeasure();
+ while (n-- > 0) {
+ // we now run *twice* so we can easily calculate the overhead
+ fn();
+ fn();
+ }
+ endMeasure();
+ detail::doNotOptimizeAway(x);
+ auto measure2 = mCounters;
+
+ for (size_t i = 0; i < mCounters.size(); ++i) {
+ // factor 2 because we have two instructions per loop
+ auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0;
+ auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0;
+ auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0;
+
+ mLoopOverhead[i] = divRounded(overhead, numIters);
+ }
+ }
+ }
+
+private:
+ bool monitor(uint32_t type, uint64_t eventid, Target target);
+
+ std::map<uint64_t, Target> mIdToTarget{};
+
+ // start with minimum size of 3 for read_format
+ std::vector<uint64_t> mCounters{3};
+ std::vector<uint64_t> mCalibratedOverhead{3};
+ std::vector<uint64_t> mLoopOverhead{3};
+
+ uint64_t mTimeEnabledNanos = 0;
+ uint64_t mTimeRunningNanos = 0;
+ int mFd = -1;
+ bool mHasError = false;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+LinuxPerformanceCounters::~LinuxPerformanceCounters() {
+ if (-1 != mFd) {
+ close(mFd);
+ }
+}
+
+bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) {
+ return monitor(PERF_TYPE_SOFTWARE, swId, target);
+}
+
+bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) {
+ return monitor(PERF_TYPE_HARDWARE, hwId, target);
+}
+
+// overflow is ok, it's checked
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+void LinuxPerformanceCounters::updateResults(uint64_t numIters) {
+ // clear old data
+ for (auto& id_value : mIdToTarget) {
+ *id_value.second.targetValue = UINT64_C(0);
+ }
+
+ if (mHasError) {
+ return;
+ }
+
+ mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1];
+ mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2];
+
+ for (uint64_t i = 0; i < mCounters[0]; ++i) {
+ auto idx = static_cast<size_t>(3 + i * 2 + 0);
+ auto id = mCounters[idx + 1U];
+
+ auto it = mIdToTarget.find(id);
+ if (it != mIdToTarget.end()) {
+
+ auto& tgt = it->second;
+ *tgt.targetValue = mCounters[idx];
+ if (tgt.correctMeasuringOverhead) {
+ if (*tgt.targetValue >= mCalibratedOverhead[idx]) {
+ *tgt.targetValue -= mCalibratedOverhead[idx];
+ } else {
+ *tgt.targetValue = 0U;
+ }
+ }
+ if (tgt.correctLoopOverhead) {
+ auto correctionVal = mLoopOverhead[idx] * numIters;
+ if (*tgt.targetValue >= correctionVal) {
+ *tgt.targetValue -= correctionVal;
+ } else {
+ *tgt.targetValue = 0U;
+ }
+ }
+ }
+ }
+}
+
+bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) {
+ *target.targetValue = (std::numeric_limits<uint64_t>::max)();
+ if (mHasError) {
+ return false;
+ }
+
+ auto pea = perf_event_attr();
+ std::memset(&pea, 0, sizeof(perf_event_attr));
+ pea.type = type;
+ pea.size = sizeof(perf_event_attr);
+ pea.config = eventid;
+ pea.disabled = 1; // start counter as disabled
+ pea.exclude_kernel = 1;
+ pea.exclude_hv = 1;
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ const int pid = 0; // the current process
+ const int cpu = -1; // all CPUs
+# if defined(PERF_FLAG_FD_CLOEXEC) // since Linux 3.14
+ const unsigned long flags = PERF_FLAG_FD_CLOEXEC;
+# else
+ const unsigned long flags = 0;
+# endif
+
+ auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags));
+ if (-1 == fd) {
+ return false;
+ }
+ if (-1 == mFd) {
+ // first call: set to fd, and use this from now on
+ mFd = fd;
+ }
+ uint64_t id = 0;
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) {
+ // couldn't get id
+ return false;
+ }
+
+ // insert into map, rely on the fact that map's references are constant.
+ mIdToTarget.emplace(id, target);
+
+ // prepare readformat with the correct size (after the insert)
+ auto size = 3 + 2 * mIdToTarget.size();
+ mCounters.resize(size);
+ mCalibratedOverhead.resize(size);
+ mLoopOverhead.resize(size);
+
+ return true;
+}
+
+PerformanceCounters::PerformanceCounters()
+ : mPc(new LinuxPerformanceCounters())
+ , mVal()
+ , mHas() {
+
+ mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults, true, false));
+ mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false));
+ mHas.contextSwitches =
+ mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches, true, false));
+ mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions, true, true));
+ mHas.branchInstructions =
+ mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions, true, false));
+ mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses, true, false));
+ // mHas.branchMisses = false;
+
+ mPc->start();
+ mPc->calibrate([] {
+ auto before = ankerl::nanobench::Clock::now();
+ auto after = ankerl::nanobench::Clock::now();
+ (void)before;
+ (void)after;
+ });
+
+ if (mPc->hasError()) {
+ // something failed, don't monitor anything.
+ mHas = PerfCountSet<bool>{};
+ }
+}
+
+PerformanceCounters::~PerformanceCounters() {
+ if (nullptr != mPc) {
+ delete mPc;
+ }
+}
+
+void PerformanceCounters::beginMeasure() {
+ mPc->beginMeasure();
+}
+
+void PerformanceCounters::endMeasure() {
+ mPc->endMeasure();
+}
+
+void PerformanceCounters::updateResults(uint64_t numIters) {
+ mPc->updateResults(numIters);
+}
+
+# else
+
+PerformanceCounters::PerformanceCounters() = default;
+PerformanceCounters::~PerformanceCounters() = default;
+void PerformanceCounters::beginMeasure() {}
+void PerformanceCounters::endMeasure() {}
+void PerformanceCounters::updateResults(uint64_t) {}
+
+# endif
+
+ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& PerformanceCounters::val() const noexcept {
+ return mVal;
+}
+ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& PerformanceCounters::has() const noexcept {
+ return mHas;
+}
+
+// formatting utilities
+namespace fmt {
+
+// adds thousands separator to numbers
+NumSep::NumSep(char sep)
+ : mSep(sep) {}
+
+char NumSep::do_thousands_sep() const {
+ return mSep;
+}
+
+std::string NumSep::do_grouping() const {
+ return "\003";
+}
+
+// RAII to save & restore a stream's state
+StreamStateRestorer::StreamStateRestorer(std::ostream& s)
+ : mStream(s)
+ , mLocale(s.getloc())
+ , mPrecision(s.precision())
+ , mWidth(s.width())
+ , mFill(s.fill())
+ , mFmtFlags(s.flags()) {}
+
+StreamStateRestorer::~StreamStateRestorer() {
+ restore();
+}
+
+// sets back all stream info that we remembered at construction
+void StreamStateRestorer::restore() {
+ mStream.imbue(mLocale);
+ mStream.precision(mPrecision);
+ mStream.width(mWidth);
+ mStream.fill(mFill);
+ mStream.flags(mFmtFlags);
+}
+
+Number::Number(int width, int precision, int64_t value)
+ : mWidth(width)
+ , mPrecision(precision)
+ , mValue(static_cast<double>(value)) {}
+
+Number::Number(int width, int precision, double value)
+ : mWidth(width)
+ , mPrecision(precision)
+ , mValue(value) {}
+
+std::ostream& Number::write(std::ostream& os) const {
+ StreamStateRestorer restorer(os);
+ os.imbue(std::locale(os.getloc(), new NumSep(',')));
+ os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
+ return os;
+}
+
+std::string Number::to_s() const {
+ std::stringstream ss;
+ write(ss);
+ return ss.str();
+}
+
+std::string to_s(uint64_t n) {
+ std::string str;
+ do {
+ str += static_cast<char>('0' + static_cast<char>(n % 10));
+ n /= 10;
+ } while (n != 0);
+ std::reverse(str.begin(), str.end());
+ return str;
+}
+
+std::ostream& operator<<(std::ostream& os, Number const& n) {
+ return n.write(os);
+}
+
+MarkDownColumn::MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val)
+ : mWidth(w)
+ , mPrecision(prec)
+ , mTitle(tit)
+ , mSuffix(suff)
+ , mValue(val) {}
+
+std::string MarkDownColumn::title() const {
+ std::stringstream ss;
+ ss << '|' << std::setw(mWidth - 2) << std::right << mTitle << ' ';
+ return ss.str();
+}
+
+std::string MarkDownColumn::separator() const {
+ std::string sep(static_cast<size_t>(mWidth), '-');
+ sep.front() = '|';
+ sep.back() = ':';
+ return sep;
+}
+
+std::string MarkDownColumn::invalid() const {
+ std::string sep(static_cast<size_t>(mWidth), ' ');
+ sep.front() = '|';
+ sep[sep.size() - 2] = '-';
+ return sep;
+}
+
+std::string MarkDownColumn::value() const {
+ std::stringstream ss;
+ auto width = mWidth - 2 - static_cast<int>(mSuffix.size());
+ ss << '|' << Number(width, mPrecision, mValue) << mSuffix << ' ';
+ return ss.str();
+}
+
+// Formats any text as markdown code, escaping backticks.
+MarkDownCode::MarkDownCode(std::string const& what) {
+ mWhat.reserve(what.size() + 2);
+ mWhat.push_back('`');
+ for (char c : what) {
+ mWhat.push_back(c);
+ if ('`' == c) {
+ mWhat.push_back('`');
+ }
+ }
+ mWhat.push_back('`');
+}
+
+std::ostream& MarkDownCode::write(std::ostream& os) const {
+ return os << mWhat;
+}
+
+std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) {
+ return mdCode.write(os);
+}
+} // namespace fmt
+} // namespace detail
+
+// provide implementation here so it's only generated once
+Config::Config() = default;
+Config::~Config() = default;
+Config& Config::operator=(Config const&) = default;
+Config& Config::operator=(Config&&) = default;
+Config::Config(Config const&) = default;
+Config::Config(Config&&) noexcept = default;
+
+// provide implementation here so it's only generated once
+Result::~Result() = default;
+Result& Result::operator=(Result const&) = default;
+Result& Result::operator=(Result&&) = default;
+Result::Result(Result const&) = default;
+Result::Result(Result&&) noexcept = default;
+
+namespace detail {
+template <typename T>
+inline constexpr typename std::underlying_type<T>::type u(T val) noexcept {
+ return static_cast<typename std::underlying_type<T>::type>(val);
+}
+} // namespace detail
+
+// Result returned after a benchmark has finished. Can be used as a baseline for relative().
+Result::Result(Config const& benchmarkConfig)
+ : mConfig(benchmarkConfig)
+ , mNameToMeasurements{detail::u(Result::Measure::_size)} {}
+
+void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) {
+ using detail::d;
+ using detail::u;
+
+ double dIters = d(iters);
+ mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
+
+ mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
+ if (pc.has().pageFaults) {
+ mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters);
+ }
+ if (pc.has().cpuCycles) {
+ mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters);
+ }
+ if (pc.has().contextSwitches) {
+ mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters);
+ }
+ if (pc.has().instructions) {
+ mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters);
+ }
+ if (pc.has().branchInstructions) {
+ double branchInstructions = 0.0;
+ // correcting branches: remove branch introduced by the while (...) loop for each iteration.
+ if (pc.val().branchInstructions > iters + 1U) {
+ branchInstructions = d(pc.val().branchInstructions - (iters + 1U));
+ }
+ mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters);
+
+ if (pc.has().branchMisses) {
+ // correcting branch misses
+ double branchMisses = d(pc.val().branchMisses);
+ if (branchMisses > branchInstructions) {
+ // can't have branch misses when there were branches...
+ branchMisses = branchInstructions;
+ }
+
+ // assuming at least one missed branch for the loop
+ branchMisses -= 1.0;
+ if (branchMisses < 1.0) {
+ branchMisses = 1.0;
+ }
+ mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters);
+ }
+ }
+}
+
+Config const& Result::config() const noexcept {
+ return mConfig;
+}
+
+inline double calcMedian(std::vector<double>& data) {
+ if (data.empty()) {
+ return 0.0;
+ }
+ std::sort(data.begin(), data.end());
+
+ auto midIdx = data.size() / 2U;
+ if (1U == (data.size() & 1U)) {
+ return data[midIdx];
+ }
+ return (data[midIdx - 1U] + data[midIdx]) / 2U;
+}
+
+double Result::median(Measure m) const {
+ // create a copy so we can sort
+ auto data = mNameToMeasurements[detail::u(m)];
+ return calcMedian(data);
+}
+
+double Result::average(Measure m) const {
+ using detail::d;
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // create a copy so we can sort
+ return sum(m) / d(data.size());
+}
+
+double Result::medianAbsolutePercentError(Measure m) const {
+ // create copy
+ auto data = mNameToMeasurements[detail::u(m)];
+
+ // calculates MdAPE which is the median of percentage error
+ // see https://www.spiderfinancial.com/support/documentation/numxl/reference-manual/forecasting-performance/mdape
+ auto med = calcMedian(data);
+
+ // transform the data to absolute error
+ for (auto& x : data) {
+ x = (x - med) / x;
+ if (x < 0) {
+ x = -x;
+ }
+ }
+ return calcMedian(data);
+}
+
+double Result::sum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ return std::accumulate(data.begin(), data.end(), 0.0);
+}
+
+double Result::sumProduct(Measure m1, Measure m2) const noexcept {
+ auto const& data1 = mNameToMeasurements[detail::u(m1)];
+ auto const& data2 = mNameToMeasurements[detail::u(m2)];
+
+ if (data1.size() != data2.size()) {
+ return 0.0;
+ }
+
+ double result = 0.0;
+ for (size_t i = 0, s = data1.size(); i != s; ++i) {
+ result += data1[i] * data2[i];
+ }
+ return result;
+}
+
+bool Result::has(Measure m) const noexcept {
+ return !mNameToMeasurements[detail::u(m)].empty();
+}
+
+double Result::get(size_t idx, Measure m) const {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ return data.at(idx);
+}
+
+bool Result::empty() const noexcept {
+ return 0U == size();
+}
+
+size_t Result::size() const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)];
+ return data.size();
+}
+
+double Result::minimum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // here its save to assume that at least one element is there
+ return *std::min_element(data.begin(), data.end());
+}
+
+double Result::maximum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // here its save to assume that at least one element is there
+ return *std::max_element(data.begin(), data.end());
+}
+
+Result::Measure Result::fromString(std::string const& str) {
+ if (str == "elapsed") {
+ return Measure::elapsed;
+ } else if (str == "iterations") {
+ return Measure::iterations;
+ } else if (str == "pagefaults") {
+ return Measure::pagefaults;
+ } else if (str == "cpucycles") {
+ return Measure::cpucycles;
+ } else if (str == "contextswitches") {
+ return Measure::contextswitches;
+ } else if (str == "instructions") {
+ return Measure::instructions;
+ } else if (str == "branchinstructions") {
+ return Measure::branchinstructions;
+ } else if (str == "branchmisses") {
+ return Measure::branchmisses;
+ } else {
+ // not found, return _size
+ return Measure::_size;
+ }
+}
+
+// Configuration of a microbenchmark.
+Bench::Bench() {
+ mConfig.mOut = &std::cout;
+}
+
+Bench::Bench(Bench&&) = default;
+Bench& Bench::operator=(Bench&&) = default;
+Bench::Bench(Bench const&) = default;
+Bench& Bench::operator=(Bench const&) = default;
+Bench::~Bench() noexcept = default;
+
+double Bench::batch() const noexcept {
+ return mConfig.mBatch;
+}
+
+double Bench::complexityN() const noexcept {
+ return mConfig.mComplexityN;
+}
+
+// Set a baseline to compare it to. 100% it is exactly as fast as the baseline, >100% means it is faster than the baseline, <100%
+// means it is slower than the baseline.
+Bench& Bench::relative(bool isRelativeEnabled) noexcept {
+ mConfig.mIsRelative = isRelativeEnabled;
+ return *this;
+}
+bool Bench::relative() const noexcept {
+ return mConfig.mIsRelative;
+}
+
+Bench& Bench::performanceCounters(bool showPerformanceCounters) noexcept {
+ mConfig.mShowPerformanceCounters = showPerformanceCounters;
+ return *this;
+}
+bool Bench::performanceCounters() const noexcept {
+ return mConfig.mShowPerformanceCounters;
+}
+
+// Operation unit. Defaults to "op", could be e.g. "byte" for string processing.
+// If u differs from currently set unit, the stored results will be cleared.
+// Use singular (byte, not bytes).
+Bench& Bench::unit(char const* u) {
+ if (u != mConfig.mUnit) {
+ mResults.clear();
+ }
+ mConfig.mUnit = u;
+ return *this;
+}
+
+Bench& Bench::unit(std::string const& u) {
+ return unit(u.c_str());
+}
+
+std::string const& Bench::unit() const noexcept {
+ return mConfig.mUnit;
+}
+
+// If benchmarkTitle differs from currently set title, the stored results will be cleared.
+Bench& Bench::title(const char* benchmarkTitle) {
+ if (benchmarkTitle != mConfig.mBenchmarkTitle) {
+ mResults.clear();
+ }
+ mConfig.mBenchmarkTitle = benchmarkTitle;
+ return *this;
+}
+Bench& Bench::title(std::string const& benchmarkTitle) {
+ if (benchmarkTitle != mConfig.mBenchmarkTitle) {
+ mResults.clear();
+ }
+ mConfig.mBenchmarkTitle = benchmarkTitle;
+ return *this;
+}
+
+std::string const& Bench::title() const noexcept {
+ return mConfig.mBenchmarkTitle;
+}
+
+Bench& Bench::name(const char* benchmarkName) {
+ mConfig.mBenchmarkName = benchmarkName;
+ return *this;
+}
+
+Bench& Bench::name(std::string const& benchmarkName) {
+ mConfig.mBenchmarkName = benchmarkName;
+ return *this;
+}
+
+std::string const& Bench::name() const noexcept {
+ return mConfig.mBenchmarkName;
+}
+
+// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch.
+Bench& Bench::epochs(size_t numEpochs) noexcept {
+ mConfig.mNumEpochs = numEpochs;
+ return *this;
+}
+size_t Bench::epochs() const noexcept {
+ return mConfig.mNumEpochs;
+}
+
+// Desired evaluation time is a multiple of clock resolution. Default is to be 1000 times above this measurement precision.
+Bench& Bench::clockResolutionMultiple(size_t multiple) noexcept {
+ mConfig.mClockResolutionMultiple = multiple;
+ return *this;
+}
+size_t Bench::clockResolutionMultiple() const noexcept {
+ return mConfig.mClockResolutionMultiple;
+}
+
+// Sets the maximum time each epoch should take. Default is 100ms.
+Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept {
+ mConfig.mMaxEpochTime = t;
+ return *this;
+}
+std::chrono::nanoseconds Bench::maxEpochTime() const noexcept {
+ return mConfig.mMaxEpochTime;
+}
+
+// Sets the maximum time each epoch should take. Default is 100ms.
+Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept {
+ mConfig.mMinEpochTime = t;
+ return *this;
+}
+std::chrono::nanoseconds Bench::minEpochTime() const noexcept {
+ return mConfig.mMinEpochTime;
+}
+
+Bench& Bench::minEpochIterations(uint64_t numIters) noexcept {
+ mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters;
+ return *this;
+}
+uint64_t Bench::minEpochIterations() const noexcept {
+ return mConfig.mMinEpochIterations;
+}
+
+Bench& Bench::epochIterations(uint64_t numIters) noexcept {
+ mConfig.mEpochIterations = numIters;
+ return *this;
+}
+uint64_t Bench::epochIterations() const noexcept {
+ return mConfig.mEpochIterations;
+}
+
+Bench& Bench::warmup(uint64_t numWarmupIters) noexcept {
+ mConfig.mWarmup = numWarmupIters;
+ return *this;
+}
+uint64_t Bench::warmup() const noexcept {
+ return mConfig.mWarmup;
+}
+
+Bench& Bench::config(Config const& benchmarkConfig) {
+ mConfig = benchmarkConfig;
+ return *this;
+}
+Config const& Bench::config() const noexcept {
+ return mConfig;
+}
+
+Bench& Bench::output(std::ostream* outstream) noexcept {
+ mConfig.mOut = outstream;
+ return *this;
+}
+
+ANKERL_NANOBENCH(NODISCARD) std::ostream* Bench::output() const noexcept {
+ return mConfig.mOut;
+}
+
+std::vector<Result> const& Bench::results() const noexcept {
+ return mResults;
+}
+
+Bench& Bench::render(char const* templateContent, std::ostream& os) {
+ ::ankerl::nanobench::render(templateContent, *this, os);
+ return *this;
+}
+
+std::vector<BigO> Bench::complexityBigO() const {
+ std::vector<BigO> bigOs;
+ auto rangeMeasure = BigO::collectRangeMeasure(mResults);
+ bigOs.emplace_back("O(1)", rangeMeasure, [](double) {
+ return 1.0;
+ });
+ bigOs.emplace_back("O(n)", rangeMeasure, [](double n) {
+ return n;
+ });
+ bigOs.emplace_back("O(log n)", rangeMeasure, [](double n) {
+ return std::log2(n);
+ });
+ bigOs.emplace_back("O(n log n)", rangeMeasure, [](double n) {
+ return n * std::log2(n);
+ });
+ bigOs.emplace_back("O(n^2)", rangeMeasure, [](double n) {
+ return n * n;
+ });
+ bigOs.emplace_back("O(n^3)", rangeMeasure, [](double n) {
+ return n * n * n;
+ });
+ std::sort(bigOs.begin(), bigOs.end());
+ return bigOs;
+}
+
+Rng::Rng()
+ : mX(0)
+ , mY(0) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint64_t> dist;
+ do {
+ mX = dist(rd);
+ mY = dist(rd);
+ } while (mX == 0 && mY == 0);
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint64_t splitMix64(uint64_t& state) noexcept {
+ uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15));
+ z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
+ z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
+ return z ^ (z >> 31U);
+}
+
+// Seeded as described in romu paper (update april 2020)
+Rng::Rng(uint64_t seed) noexcept
+ : mX(splitMix64(seed))
+ , mY(splitMix64(seed)) {
+ for (size_t i = 0; i < 10; ++i) {
+ operator()();
+ }
+}
+
+// only internally used to copy the RNG.
+Rng::Rng(uint64_t x, uint64_t y) noexcept
+ : mX(x)
+ , mY(y) {}
+
+Rng Rng::copy() const noexcept {
+ return Rng{mX, mY};
+}
+
+BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results) {
+ BigO::RangeMeasure rangeMeasure;
+ for (auto const& result : results) {
+ if (result.config().mComplexityN > 0.0) {
+ rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed));
+ }
+ }
+ return rangeMeasure;
+}
+
+BigO::BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure)
+ : mName(bigOName) {
+
+ // estimate the constant factor
+ double sumRangeMeasure = 0.0;
+ double sumRangeRange = 0.0;
+
+ for (size_t i = 0; i < rangeMeasure.size(); ++i) {
+ sumRangeMeasure += rangeMeasure[i].first * rangeMeasure[i].second;
+ sumRangeRange += rangeMeasure[i].first * rangeMeasure[i].first;
+ }
+ mConstant = sumRangeMeasure / sumRangeRange;
+
+ // calculate root mean square
+ double err = 0.0;
+ double sumMeasure = 0.0;
+ for (size_t i = 0; i < rangeMeasure.size(); ++i) {
+ auto diff = mConstant * rangeMeasure[i].first - rangeMeasure[i].second;
+ err += diff * diff;
+
+ sumMeasure += rangeMeasure[i].second;
+ }
+
+ auto n = static_cast<double>(rangeMeasure.size());
+ auto mean = sumMeasure / n;
+ mNormalizedRootMeanSquare = std::sqrt(err / n) / mean;
+}
+
+BigO::BigO(const char* bigOName, RangeMeasure const& rangeMeasure)
+ : BigO(std::string(bigOName), rangeMeasure) {}
+
+std::string const& BigO::name() const noexcept {
+ return mName;
+}
+
+double BigO::constant() const noexcept {
+ return mConstant;
+}
+
+double BigO::normalizedRootMeanSquare() const noexcept {
+ return mNormalizedRootMeanSquare;
+}
+
+bool BigO::operator<(BigO const& other) const noexcept {
+ return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName);
+}
+
+std::ostream& operator<<(std::ostream& os, BigO const& bigO) {
+ return os << bigO.constant() << " * " << bigO.name() << ", rms=" << bigO.normalizedRootMeanSquare();
+}
+
+std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) {
+ detail::fmt::StreamStateRestorer restorer(os);
+ os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl;
+ for (auto const& bigO : bigOs) {
+ os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " ";
+ os << "|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) << "% ";
+ os << "| " << bigO.name();
+ os << std::endl;
+ }
+ return os;
+}
+
+} // namespace nanobench
+} // namespace ankerl
+
+#endif // ANKERL_NANOBENCH_IMPLEMENT
+#endif // ANKERL_NANOBENCH_H_INCLUDED
diff --git a/src/bench/poly1305.cpp b/src/bench/poly1305.cpp
index 02e5fecc0d..d8db99e7d4 100644
--- a/src/bench/poly1305.cpp
+++ b/src/bench/poly1305.cpp
@@ -11,30 +11,31 @@ static constexpr uint64_t BUFFER_SIZE_TINY = 64;
static constexpr uint64_t BUFFER_SIZE_SMALL = 256;
static constexpr uint64_t BUFFER_SIZE_LARGE = 1024*1024;
-static void POLY1305(benchmark::State& state, size_t buffersize)
+static void POLY1305(benchmark::Bench& bench, size_t buffersize)
{
std::vector<unsigned char> tag(POLY1305_TAGLEN, 0);
std::vector<unsigned char> key(POLY1305_KEYLEN, 0);
std::vector<unsigned char> in(buffersize, 0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
poly1305_auth(tag.data(), in.data(), in.size(), key.data());
+ });
}
-static void POLY1305_64BYTES(benchmark::State& state)
+static void POLY1305_64BYTES(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_TINY);
+ POLY1305(bench, BUFFER_SIZE_TINY);
}
-static void POLY1305_256BYTES(benchmark::State& state)
+static void POLY1305_256BYTES(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_SMALL);
+ POLY1305(bench, BUFFER_SIZE_SMALL);
}
-static void POLY1305_1MB(benchmark::State& state)
+static void POLY1305_1MB(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_LARGE);
+ POLY1305(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(POLY1305_64BYTES, 500000);
-BENCHMARK(POLY1305_256BYTES, 250000);
-BENCHMARK(POLY1305_1MB, 340);
+BENCHMARK(POLY1305_64BYTES);
+BENCHMARK(POLY1305_256BYTES);
+BENCHMARK(POLY1305_1MB);
diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp
index 42b351a72d..a2dbefa54a 100644
--- a/src/bench/prevector.cpp
+++ b/src/bench/prevector.cpp
@@ -30,51 +30,44 @@ static_assert(IS_TRIVIALLY_CONSTRUCTIBLE<trivial_t>::value,
"expected trivial_t to be trivially constructible");
template <typename T>
-static void PrevectorDestructor(benchmark::State& state)
+static void PrevectorDestructor(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
- for (auto x = 0; x < 1000; ++x) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- t0.resize(28);
- t1.resize(29);
- }
- }
+ bench.batch(2).run([&] {
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ t0.resize(28);
+ t1.resize(29);
+ });
}
template <typename T>
-static void PrevectorClear(benchmark::State& state)
+static void PrevectorClear(benchmark::Bench& bench)
{
-
- while (state.KeepRunning()) {
- for (auto x = 0; x < 1000; ++x) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- t0.resize(28);
- t0.clear();
- t1.resize(29);
- t1.clear();
- }
- }
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ bench.batch(2).run([&] {
+ t0.resize(28);
+ t0.clear();
+ t1.resize(29);
+ t1.clear();
+ });
}
template <typename T>
-static void PrevectorResize(benchmark::State& state)
+static void PrevectorResize(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- for (auto x = 0; x < 1000; ++x) {
- t0.resize(28);
- t0.resize(0);
- t1.resize(29);
- t1.resize(0);
- }
- }
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ bench.batch(4).run([&] {
+ t0.resize(28);
+ t0.resize(0);
+ t1.resize(29);
+ t1.resize(0);
+ });
}
template <typename T>
-static void PrevectorDeserialize(benchmark::State& state)
+static void PrevectorDeserialize(benchmark::Bench& bench)
{
CDataStream s0(SER_NETWORK, 0);
prevector<28, T> t0;
@@ -86,26 +79,28 @@ static void PrevectorDeserialize(benchmark::State& state)
for (auto x = 0; x < 101; ++x) {
s0 << t0;
}
- while (state.KeepRunning()) {
+ bench.batch(1000).run([&] {
prevector<28, T> t1;
for (auto x = 0; x < 1000; ++x) {
s0 >> t1;
}
s0.Init(SER_NETWORK, 0);
- }
+ });
}
-#define PREVECTOR_TEST(name, nontrivops, trivops) \
- static void Prevector ## name ## Nontrivial(benchmark::State& state) { \
- Prevector ## name<nontrivial_t>(state); \
- } \
- BENCHMARK(Prevector ## name ## Nontrivial, nontrivops); \
- static void Prevector ## name ## Trivial(benchmark::State& state) { \
- Prevector ## name<trivial_t>(state); \
- } \
- BENCHMARK(Prevector ## name ## Trivial, trivops);
+#define PREVECTOR_TEST(name) \
+ static void Prevector##name##Nontrivial(benchmark::Bench& bench) \
+ { \
+ Prevector##name<nontrivial_t>(bench); \
+ } \
+ BENCHMARK(Prevector##name##Nontrivial); \
+ static void Prevector##name##Trivial(benchmark::Bench& bench) \
+ { \
+ Prevector##name<trivial_t>(bench); \
+ } \
+ BENCHMARK(Prevector##name##Trivial);
-PREVECTOR_TEST(Clear, 28300, 88600)
-PREVECTOR_TEST(Destructor, 28800, 88900)
-PREVECTOR_TEST(Resize, 28900, 90300)
-PREVECTOR_TEST(Deserialize, 6800, 52000)
+PREVECTOR_TEST(Clear)
+PREVECTOR_TEST(Destructor)
+PREVECTOR_TEST(Resize)
+PREVECTOR_TEST(Deserialize)
diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp
index 6cdb4ff0a7..9b43951e6e 100644
--- a/src/bench/rollingbloom.cpp
+++ b/src/bench/rollingbloom.cpp
@@ -6,12 +6,12 @@
#include <bench/bench.h>
#include <bloom.h>
-static void RollingBloom(benchmark::State& state)
+static void RollingBloom(benchmark::Bench& bench)
{
CRollingBloomFilter filter(120000, 0.000001);
std::vector<unsigned char> data(32);
uint32_t count = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
count++;
data[0] = count;
data[1] = count >> 8;
@@ -24,16 +24,16 @@ static void RollingBloom(benchmark::State& state)
data[2] = count >> 8;
data[3] = count;
filter.contains(data);
- }
+ });
}
-static void RollingBloomReset(benchmark::State& state)
+static void RollingBloomReset(benchmark::Bench& bench)
{
CRollingBloomFilter filter(120000, 0.000001);
- while (state.KeepRunning()) {
+ bench.run([&] {
filter.reset();
- }
+ });
}
-BENCHMARK(RollingBloom, 1500 * 1000);
-BENCHMARK(RollingBloomReset, 20000);
+BENCHMARK(RollingBloom);
+BENCHMARK(RollingBloomReset);
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index 511573abac..4b45264a3c 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -11,7 +11,8 @@
#include <univalue.h>
-static void BlockToJsonVerbose(benchmark::State& state) {
+static void BlockToJsonVerbose(benchmark::Bench& bench)
+{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
stream.write(&a, 1); // Prevent compaction
@@ -24,9 +25,9 @@ static void BlockToJsonVerbose(benchmark::State& state) {
blockindex.phashBlock = &blockHash;
blockindex.nBits = 403014710;
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true);
- }
+ });
}
-BENCHMARK(BlockToJsonVerbose, 10);
+BENCHMARK(BlockToJsonVerbose);
diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp
index bf63cccf09..1ff41765cf 100644
--- a/src/bench/rpc_mempool.cpp
+++ b/src/bench/rpc_mempool.cpp
@@ -15,7 +15,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& poo
pool.addUnchecked(CTxMemPoolEntry(tx, fee, /* time */ 0, /* height */ 1, /* spendsCoinbase */ false, /* sigOpCost */ 4, lp));
}
-static void RpcMempool(benchmark::State& state)
+static void RpcMempool(benchmark::Bench& bench)
{
CTxMemPool pool;
LOCK2(cs_main, pool.cs);
@@ -32,9 +32,9 @@ static void RpcMempool(benchmark::State& state)
AddTx(tx_r, /* fee */ i, pool);
}
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)MempoolToJSON(pool, /*verbose*/ true);
- }
+ });
}
-BENCHMARK(RpcMempool, 40);
+BENCHMARK(RpcMempool);
diff --git a/src/bench/util_time.cpp b/src/bench/util_time.cpp
index 72d97354aa..fad179eb87 100644
--- a/src/bench/util_time.cpp
+++ b/src/bench/util_time.cpp
@@ -6,37 +6,37 @@
#include <util/time.h>
-static void BenchTimeDeprecated(benchmark::State& state)
+static void BenchTimeDeprecated(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime();
- }
+ });
}
-static void BenchTimeMock(benchmark::State& state)
+static void BenchTimeMock(benchmark::Bench& bench)
{
SetMockTime(111);
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime<std::chrono::seconds>();
- }
+ });
SetMockTime(0);
}
-static void BenchTimeMillis(benchmark::State& state)
+static void BenchTimeMillis(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime<std::chrono::milliseconds>();
- }
+ });
}
-static void BenchTimeMillisSys(benchmark::State& state)
+static void BenchTimeMillisSys(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTimeMillis();
- }
+ });
}
-BENCHMARK(BenchTimeDeprecated, 100000000);
-BENCHMARK(BenchTimeMillis, 6000000);
-BENCHMARK(BenchTimeMillisSys, 6000000);
-BENCHMARK(BenchTimeMock, 300000000);
+BENCHMARK(BenchTimeDeprecated);
+BENCHMARK(BenchTimeMillis);
+BENCHMARK(BenchTimeMillisSys);
+BENCHMARK(BenchTimeMock);
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index 14bca5f7d1..9af0b502eb 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -16,7 +16,7 @@
// Microbenchmark for verification of a basic P2WPKH script. Can be easily
// modified to measure performance of other types of scripts.
-static void VerifyScriptBench(benchmark::State& state)
+static void VerifyScriptBench(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -34,7 +34,7 @@ static void VerifyScriptBench(benchmark::State& state)
key.Set(vchKey.begin(), vchKey.end(), false);
CPubKey pubkey = key.GetPubKey();
uint160 pubkeyHash;
- CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(pubkeyHash.begin());
+ CHash160().Write(pubkey).Finalize(pubkeyHash);
// Script.
CScript scriptPubKey = CScript() << witnessversion << ToByteVector(pubkeyHash);
@@ -49,7 +49,7 @@ static void VerifyScriptBench(benchmark::State& state)
witness.stack.push_back(ToByteVector(pubkey));
// Benchmark.
- while (state.KeepRunning()) {
+ bench.run([&] {
ScriptError err;
bool success = VerifyScript(
txSpend.vin[0].scriptSig,
@@ -71,11 +71,12 @@ static void VerifyScriptBench(benchmark::State& state)
(const unsigned char*)stream.data(), stream.size(), 0, flags, nullptr);
assert(csuccess == 1);
#endif
- }
+ });
ECC_Stop();
}
-static void VerifyNestedIfScript(benchmark::State& state) {
+static void VerifyNestedIfScript(benchmark::Bench& bench)
+{
std::vector<std::vector<unsigned char>> stack;
CScript script;
for (int i = 0; i < 100; ++i) {
@@ -87,15 +88,13 @@ static void VerifyNestedIfScript(benchmark::State& state) {
for (int i = 0; i < 100; ++i) {
script << OP_ENDIF;
}
- while (state.KeepRunning()) {
+ bench.run([&] {
auto stack_copy = stack;
ScriptError error;
bool ret = EvalScript(stack_copy, script, 0, BaseSignatureChecker(), SigVersion::BASE, &error);
assert(ret);
- }
+ });
}
-
-BENCHMARK(VerifyScriptBench, 6300);
-
-BENCHMARK(VerifyNestedIfScript, 100);
+BENCHMARK(VerifyScriptBench);
+BENCHMARK(VerifyNestedIfScript);
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index 05cfb3438e..e16182b48e 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -12,7 +12,7 @@
#include <validationinterface.h>
#include <wallet/wallet.h>
-static void WalletBalance(benchmark::State& state, const bool set_dirty, const bool add_watchonly, const bool add_mine)
+static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const bool add_watchonly, const bool add_mine)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -45,20 +45,20 @@ static void WalletBalance(benchmark::State& state, const bool set_dirty, const b
auto bal = wallet.GetBalance(); // Cache
- while (state.KeepRunning()) {
+ bench.run([&] {
if (set_dirty) wallet.MarkDirty();
bal = wallet.GetBalance();
if (add_mine) assert(bal.m_mine_trusted > 0);
if (add_watchonly) assert(bal.m_watchonly_trusted > 0);
- }
+ });
}
-static void WalletBalanceDirty(benchmark::State& state) { WalletBalance(state, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); }
-static void WalletBalanceClean(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); }
-static void WalletBalanceMine(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); }
-static void WalletBalanceWatch(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); }
+static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); }
+static void WalletBalanceClean(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); }
+static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); }
+static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); }
-BENCHMARK(WalletBalanceDirty, 2500);
-BENCHMARK(WalletBalanceClean, 8000);
-BENCHMARK(WalletBalanceMine, 16000);
-BENCHMARK(WalletBalanceWatch, 8000);
+BENCHMARK(WalletBalanceDirty);
+BENCHMARK(WalletBalanceClean);
+BENCHMARK(WalletBalanceMine);
+BENCHMARK(WalletBalanceWatch);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 9afcda4578..cf52b710cb 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -43,32 +43,32 @@ static const int CONTINUE_EXECUTION=-1;
/** Default number of blocks to generate for RPC generatetoaddress. */
static const std::string DEFAULT_NBLOCKS = "1";
-static void SetupCliArgs()
+static void SetupCliArgs(ArgsManager& argsman)
{
- SetupHelpOptions(gArgs);
+ SetupHelpOptions(argsman);
const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET);
const auto regtestBaseParams = CreateBaseChainParams(CBaseChainParams::REGTEST);
- gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- SetupChainParamsBaseOptions();
- gArgs.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ SetupChainParamsBaseOptions(argsman);
+ argsman.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
}
/** libevent event log callback */
@@ -111,7 +111,7 @@ static int AppInitRPC(int argc, char* argv[])
//
// Parameters
//
- SetupCliArgs();
+ SetupCliArgs(gArgs);
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index f54a299a36..56afcb6ded 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -36,40 +36,40 @@ static const int CONTINUE_EXECUTION=-1;
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
-static void SetupBitcoinTxArgs()
+static void SetupBitcoinTxArgs(ArgsManager &argsman)
{
- SetupHelpOptions(gArgs);
-
- gArgs.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- SetupChainParamsBaseOptions();
-
- gArgs.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. "
+ SetupHelpOptions(argsman);
+
+ argsman.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ SetupChainParamsBaseOptions(argsman);
+
+ argsman.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. "
"Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. "
+ argsman.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. "
"Optionally add the \"W\" flag to produce a pay-to-witness-pubkey-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. "
+ argsman.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. "
"Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. "
+ argsman.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. "
"This command requires JSON registers:"
"prevtxs=JSON object, "
"privatekeys=JSON object. "
"See signrawtransactionwithkey docs for format of sighash flags, JSON objects.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
- gArgs.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
+ argsman.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
+ argsman.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
}
//
@@ -81,7 +81,7 @@ static int AppInitRawTx(int argc, char* argv[])
//
// Parameters
//
- SetupBitcoinTxArgs();
+ SetupBitcoinTxArgs(gArgs);
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index b420463c00..06b0c86476 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -19,24 +19,24 @@
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
UrlDecodeFn* const URL_DECODE = nullptr;
-static void SetupWalletToolArgs()
+static void SetupWalletToolArgs(ArgsManager& argsman)
{
- SetupHelpOptions(gArgs);
- SetupChainParamsBaseOptions();
+ SetupHelpOptions(argsman);
+ SetupChainParamsBaseOptions(argsman);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
}
static bool WalletAppInit(int argc, char* argv[])
{
- SetupWalletToolArgs();
+ SetupWalletToolArgs(gArgs);
std::string error_message;
if (!gArgs.ParseParameters(argc, argv, error_message)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message);
diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp
index 5f5bed5bda..9a6fb4abd0 100644
--- a/src/blockfilter.cpp
+++ b/src/blockfilter.cpp
@@ -291,7 +291,7 @@ uint256 BlockFilter::GetHash() const
const std::vector<unsigned char>& data = GetEncodedFilter();
uint256 result;
- CHash256().Write(data.data(), data.size()).Finalize(result.begin());
+ CHash256().Write(data).Finalize(result);
return result;
}
@@ -301,8 +301,8 @@ uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const
uint256 result;
CHash256()
- .Write(filter_hash.begin(), filter_hash.size())
- .Write(prev_header.begin(), prev_header.size())
- .Finalize(result.begin());
+ .Write(filter_hash)
+ .Write(prev_header)
+ .Finalize(result);
return result;
}
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 092c45e4ce..a7c9e33f07 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -341,8 +341,8 @@ public:
void CRegTestParams::UpdateActivationParametersFromArgs(const ArgsManager& args)
{
- if (gArgs.IsArgSet("-segwitheight")) {
- int64_t height = gArgs.GetArg("-segwitheight", consensus.SegwitHeight);
+ if (args.IsArgSet("-segwitheight")) {
+ int64_t height = args.GetArg("-segwitheight", consensus.SegwitHeight);
if (height < -1 || height >= std::numeric_limits<int>::max()) {
throw std::runtime_error(strprintf("Activation height %ld for segwit is out of valid range. Use -1 to disable segwit.", height));
} else if (height == -1) {
diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp
index 894b8553c4..1825ced640 100644
--- a/src/chainparamsbase.cpp
+++ b/src/chainparamsbase.cpp
@@ -15,14 +15,14 @@ const std::string CBaseChainParams::MAIN = "main";
const std::string CBaseChainParams::TESTNET = "test";
const std::string CBaseChainParams::REGTEST = "regtest";
-void SetupChainParamsBaseOptions()
+void SetupChainParamsBaseOptions(ArgsManager& argsman)
{
- gArgs.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. "
+ argsman.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. "
"This is intended for regression testing tools and app development. Equivalent to -chain=regtest.", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
}
static std::unique_ptr<CBaseChainParams> globalChainBaseParams;
diff --git a/src/chainparamsbase.h b/src/chainparamsbase.h
index 3c139931ea..1c52d0ea97 100644
--- a/src/chainparamsbase.h
+++ b/src/chainparamsbase.h
@@ -8,6 +8,8 @@
#include <memory>
#include <string>
+class ArgsManager;
+
/**
* CBaseChainParams defines the base parameters (shared between bitcoin-cli and bitcoind)
* of a given instance of the Bitcoin system.
@@ -43,7 +45,7 @@ std::unique_ptr<CBaseChainParams> CreateBaseChainParams(const std::string& chain
/**
*Set the arguments for chainparams
*/
-void SetupChainParamsBaseOptions();
+void SetupChainParamsBaseOptions(ArgsManager& argsman);
/**
* Return the currently selected parameters. This won't change after app
diff --git a/src/coins.cpp b/src/coins.cpp
index 7b76c13f98..5de2ed7810 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -245,6 +245,14 @@ bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const
return true;
}
+void CCoinsViewCache::ReallocateCache()
+{
+ // Cache should be empty when we're calling this.
+ assert(cacheCoins.size() == 0);
+ cacheCoins.~CCoinsMap();
+ ::new (&cacheCoins) CCoinsMap();
+}
+
static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), PROTOCOL_VERSION);
static const size_t MAX_OUTPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_OUTPUT_WEIGHT;
diff --git a/src/coins.h b/src/coins.h
index a3f34bb0ee..a3e241ac90 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -318,6 +318,13 @@ public:
//! Check whether all prevouts of the transaction are present in the UTXO set represented by this view
bool HaveInputs(const CTransaction& tx) const;
+ //! Force a reallocation of the cache map. This is required when downsizing
+ //! the cache because the map's allocator may be hanging onto a lot of
+ //! memory despite having called .clear().
+ //!
+ //! See: https://stackoverflow.com/questions/42114044/how-to-release-unordered-map-memory
+ void ReallocateCache();
+
private:
/**
* @note this is marked const, but may actually append to `cacheCoins`, increasing
diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp
index 0f7848bae1..18dc7a69e2 100644
--- a/src/dummywallet.cpp
+++ b/src/dummywallet.cpp
@@ -20,14 +20,14 @@ class DummyWalletInit : public WalletInitInterface {
public:
bool HasWalletSupport() const override {return false;}
- void AddWalletOptions() const override;
+ void AddWalletOptions(ArgsManager& argsman) const override;
bool ParameterInteraction() const override {return true;}
void Construct(NodeContext& node) const override {LogPrintf("No wallet support compiled in!\n");}
};
-void DummyWalletInit::AddWalletOptions() const
+void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
{
- gArgs.AddHiddenArgs({
+ argsman.AddHiddenArgs({
"-addresstype",
"-avoidpartialspends",
"-changetype",
diff --git a/src/hash.cpp b/src/hash.cpp
index 26150e5ca8..4c09f5f646 100644
--- a/src/hash.cpp
+++ b/src/hash.cpp
@@ -12,7 +12,7 @@ inline uint32_t ROTL32(uint32_t x, int8_t r)
return (x << r) | (x >> (32 - r));
}
-unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash)
+unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash)
{
// The following is MurmurHash3 (x86_32), see http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
uint32_t h1 = nHashSeed;
diff --git a/src/hash.h b/src/hash.h
index c295568a3e..71806483ff 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -25,14 +25,15 @@ private:
public:
static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE;
- void Finalize(unsigned char hash[OUTPUT_SIZE]) {
+ void Finalize(Span<unsigned char> output) {
+ assert(output.size() == OUTPUT_SIZE);
unsigned char buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
- sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash);
+ sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
- CHash256& Write(const unsigned char *data, size_t len) {
- sha.Write(data, len);
+ CHash256& Write(Span<const unsigned char> input) {
+ sha.Write(input.data(), input.size());
return *this;
}
@@ -49,14 +50,15 @@ private:
public:
static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE;
- void Finalize(unsigned char hash[OUTPUT_SIZE]) {
+ void Finalize(Span<unsigned char> output) {
+ assert(output.size() == OUTPUT_SIZE);
unsigned char buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
- CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash);
+ CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
- CHash160& Write(const unsigned char *data, size_t len) {
- sha.Write(data, len);
+ CHash160& Write(Span<const unsigned char> input) {
+ sha.Write(input.data(), input.size());
return *this;
}
@@ -67,52 +69,31 @@ public:
};
/** Compute the 256-bit hash of an object. */
-template<typename T1>
-inline uint256 Hash(const T1 pbegin, const T1 pend)
+template<typename T>
+inline uint256 Hash(const T& in1)
{
- static const unsigned char pblank[1] = {};
uint256 result;
- CHash256().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0]))
- .Finalize((unsigned char*)&result);
+ CHash256().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
/** Compute the 256-bit hash of the concatenation of two objects. */
template<typename T1, typename T2>
-inline uint256 Hash(const T1 p1begin, const T1 p1end,
- const T2 p2begin, const T2 p2end) {
- static const unsigned char pblank[1] = {};
+inline uint256 Hash(const T1& in1, const T2& in2) {
uint256 result;
- CHash256().Write(p1begin == p1end ? pblank : (const unsigned char*)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0]))
- .Write(p2begin == p2end ? pblank : (const unsigned char*)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0]))
- .Finalize((unsigned char*)&result);
+ CHash256().Write(MakeUCharSpan(in1)).Write(MakeUCharSpan(in2)).Finalize(result);
return result;
}
/** Compute the 160-bit hash an object. */
template<typename T1>
-inline uint160 Hash160(const T1 pbegin, const T1 pend)
+inline uint160 Hash160(const T1& in1)
{
- static unsigned char pblank[1] = {};
uint160 result;
- CHash160().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0]))
- .Finalize((unsigned char*)&result);
+ CHash160().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
-/** Compute the 160-bit hash of a vector. */
-inline uint160 Hash160(const std::vector<unsigned char>& vch)
-{
- return Hash160(vch.begin(), vch.end());
-}
-
-/** Compute the 160-bit hash of a vector. */
-template<unsigned int N>
-inline uint160 Hash160(const prevector<N, unsigned char>& vch)
-{
- return Hash160(vch.begin(), vch.end());
-}
-
/** A writer stream (for serialization) that computes a 256-bit hash. */
class CHashWriter
{
@@ -129,13 +110,13 @@ public:
int GetVersion() const { return nVersion; }
void write(const char *pch, size_t size) {
- ctx.Write((const unsigned char*)pch, size);
+ ctx.Write({(const unsigned char*)pch, size});
}
// invalidates the object
uint256 GetHash() {
uint256 result;
- ctx.Finalize((unsigned char*)&result);
+ ctx.Finalize(result);
return result;
}
@@ -200,7 +181,7 @@ uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL
return ss.GetHash();
}
-unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash);
+unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash);
void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64]);
diff --git a/src/init.cpp b/src/init.cpp
index acf9f8bd91..6cca21f375 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -369,9 +369,10 @@ void SetupServerArgs(NodeContext& node)
{
assert(!node.args);
node.args = &gArgs;
+ ArgsManager& argsman = *node.args;
SetupHelpOptions(gArgs);
- gArgs.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now
+ argsman.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now
const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET);
@@ -386,109 +387,109 @@ void SetupServerArgs(NodeContext& node)
// GUI args. These will be overwritten by SetupUIArgs for the GUI
"-choosedatadir", "-lang=<lang>", "-min", "-resetguisettings", "-splash", "-uiplatform"};
- gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#if HAVE_SYSTEM
- gArgs.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
- gArgs.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#if HAVE_SYSTEM
- gArgs.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
- gArgs.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)",
+ argsman.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)",
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
+ argsman.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#ifndef WIN32
- gArgs.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#else
hidden_args.emplace_back("-sysperms");
#endif
- gArgs.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blockfilterindex=<type>",
+ argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blockfilterindex=<type>",
strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
- gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
+ argsman.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
+ argsman.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
#ifdef USE_UPNP
#if USE_UPNP
- gArgs.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#else
- gArgs.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#endif
#else
hidden_args.emplace_back("-upnp");
#endif
- gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. "
+ argsman.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. "
"Use [host]:port notation for IPv6. Allowed permissions: " + Join(NET_PERMISSIONS_DOC, ", ") + ". "
"Specify multiple permissions separated by commas (default: download,noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or "
+ argsman.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or "
"CIDR-notated network (e.g. 1.2.3.0/24). Uses the same permissions as "
"-whitebind. Can be specified multiple times." , ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- g_wallet_init_interface.AddWalletOptions();
+ g_wallet_init_interface.AddWalletOptions(argsman);
#if ENABLE_ZMQ
- gArgs.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
#else
hidden_args.emplace_back("-zmqpubhashblock=<address>");
hidden_args.emplace_back("-zmqpubhashtx=<address>");
@@ -500,82 +501,82 @@ void SetupServerArgs(NodeContext& node)
hidden_args.emplace_back("-zmqpubrawtxhwm=<n>");
#endif
- gArgs.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). "
+ argsman.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). "
"If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ".",
ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
#ifdef HAVE_THREAD_LOCAL
- gArgs.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
#else
hidden_args.emplace_back("-logthreadnames");
#endif
- gArgs.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
-
- SetupChainParamsBaseOptions();
-
- gArgs.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
+ argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+
+ SetupChainParamsBaseOptions(argsman);
+
+ argsman.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
-
-
- gArgs.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
- gArgs.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
- gArgs.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION);
-
- gArgs.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
- gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+
+
+ argsman.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
+ argsman.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
+ argsman.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION);
+
+ argsman.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
+ argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
#if HAVE_DECL_DAEMON
- gArgs.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#else
hidden_args.emplace_back("-daemon");
#endif
// Add the hidden options
- gArgs.AddHiddenArgs(hidden_args);
+ argsman.AddHiddenArgs(hidden_args);
}
std::string LicenseInfo()
@@ -1534,7 +1535,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // use 25%-50% of the remainder for disk cache
nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); // cap total coins db cache
nTotalCache -= nCoinDBCache;
- nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
+ int64_t nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1f MiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024));
@@ -1563,7 +1564,10 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
try {
LOCK(cs_main);
chainman.InitializeChainstate();
- UnloadBlockIndex();
+ chainman.m_total_coinstip_cache = nCoinCacheUsage;
+ chainman.m_total_coinsdb_cache = nCoinDBCache;
+
+ UnloadBlockIndex(node.mempool);
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
@@ -1646,7 +1650,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// The on-disk coinsdb is now in a good state, create the cache
- chainstate->InitCoinsCache();
+ chainstate->InitCoinsCache(nCoinCacheUsage);
assert(chainstate->CanFlushToDisk());
if (!is_coinsview_empty(chainstate)) {
diff --git a/src/key.cpp b/src/key.cpp
index 7eecc6e083..4ed74a39b1 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -237,7 +237,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const {
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd, sizeof(rnd));
uint256 hash;
- CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin());
+ CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
std::vector<unsigned char> vchSig;
Sign(hash, vchSig);
return pubkey.Verify(hash, vchSig);
diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp
index 8072b12119..b571d463c9 100644
--- a/src/merkleblock.cpp
+++ b/src/merkleblock.cpp
@@ -70,7 +70,7 @@ uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::ve
else
right = left;
// combine subhashes
- return Hash(left.begin(), left.end(), right.begin(), right.end());
+ return Hash(left, right);
}
}
@@ -126,7 +126,7 @@ uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, uns
right = left;
}
// and combine them before returning
- return Hash(left.begin(), left.end(), right.begin(), right.end());
+ return Hash(left, right);
}
}
diff --git a/src/net.cpp b/src/net.cpp
index 0c56cddbdc..9c72c62df9 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -685,7 +685,7 @@ int V1TransportDeserializer::readData(const char *pch, unsigned int nBytes)
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
- hasher.Write((const unsigned char*)pch, nCopy);
+ hasher.Write({(const unsigned char*)pch, nCopy});
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
@@ -696,7 +696,7 @@ const uint256& V1TransportDeserializer::GetMessageHash() const
{
assert(Complete());
if (data_hash.IsNull())
- hasher.Finalize(data_hash.begin());
+ hasher.Finalize(data_hash);
return data_hash;
}
@@ -736,7 +736,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) {
// create dbl-sha256 checksum
- uint256 hash = Hash(msg.data.begin(), msg.data.end());
+ uint256 hash = Hash(msg.data);
// create header
CMessageHeader hdr(Params().MessageStart(), msg.m_type.c_str(), msg.data.size());
@@ -2530,7 +2530,24 @@ void CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddres
std::vector<CAddress> CConnman::GetAddresses()
{
- return addrman.GetAddr();
+ std::vector<CAddress> addresses = addrman.GetAddr();
+ if (m_banman) {
+ addresses.erase(std::remove_if(addresses.begin(), addresses.end(),
+ [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}),
+ addresses.end());
+ }
+ return addresses;
+}
+
+std::vector<CAddress> CConnman::GetAddresses(Network requestor_network)
+{
+ const auto current_time = GetTime<std::chrono::microseconds>();
+ if (m_addr_response_caches.find(requestor_network) == m_addr_response_caches.end() ||
+ m_addr_response_caches[requestor_network].m_update_addr_response < current_time) {
+ m_addr_response_caches[requestor_network].m_addrs_response_cache = GetAddresses();
+ m_addr_response_caches[requestor_network].m_update_addr_response = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6));
+ }
+ return m_addr_response_caches[requestor_network].m_addrs_response_cache;
}
bool CConnman::AddNode(const std::string& strNode)
diff --git a/src/net.h b/src/net.h
index 17d8fda372..1c558ee810 100644
--- a/src/net.h
+++ b/src/net.h
@@ -27,6 +27,7 @@
#include <atomic>
#include <cstdint>
#include <deque>
+#include <map>
#include <thread>
#include <memory>
#include <condition_variable>
@@ -52,6 +53,9 @@ static const int TIMEOUT_INTERVAL = 20 * 60;
static const int FEELER_INTERVAL = 120;
/** The maximum number of new addresses to accumulate before announcing. */
static const unsigned int MAX_ADDR_TO_SEND = 1000;
+// TODO: remove ADDRMAN_GETADDR_MAX and let the caller specify this limit with MAX_ADDR_TO_SEND.
+static_assert(MAX_ADDR_TO_SEND == ADDRMAN_GETADDR_MAX,
+ "Max allowed ADDR message size should be equal to the max number of records returned from AddrMan.");
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000;
/** Maximum length of the user agent string in `version` message */
@@ -251,6 +255,13 @@ public:
void MarkAddressGood(const CAddress& addr);
void AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0);
std::vector<CAddress> GetAddresses();
+ /**
+ * Cache is used to minimize topology leaks, so it should
+ * be used for all non-trusted calls, for example, p2p.
+ * A non-malicious call (from RPC or a peer with addr permission) should
+ * call the function without a parameter to avoid using the cache.
+ */
+ std::vector<CAddress> GetAddresses(Network requestor_network);
// This allows temporarily exceeding m_max_outbound_full_relay, with the goal of finding
// a peer that is better than all our current peers.
@@ -416,6 +427,29 @@ private:
unsigned int nPrevNodeCount{0};
/**
+ * Cache responses to addr requests to minimize privacy leak.
+ * Attack example: scraping addrs in real-time may allow an attacker
+ * to infer new connections of the victim by detecting new records
+ * with fresh timestamps (per self-announcement).
+ */
+ struct CachedAddrResponse {
+ std::vector<CAddress> m_addrs_response_cache;
+ std::chrono::microseconds m_update_addr_response{0};
+ };
+
+ /**
+ * Addr responses stored in different caches
+ * per network prevent cross-network node identification.
+ * If a node for example is multi-homed under Tor and IPv6,
+ * a single cache (or no cache at all) would let an attacker
+ * to easily detect that it is the same node by comparing responses.
+ * The used memory equals to 1000 CAddress records (or around 32 bytes) per
+ * distinct Network (up to 5) we have/had an inbound peer from,
+ * resulting in at most ~160 KB.
+ */
+ std::map<Network, CachedAddrResponse> m_addr_response_caches;
+
+ /**
* Services this instance offers.
*
* This data is replicated in each CNode instance we create during peer
diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp
index a75838307c..53648deb40 100644
--- a/src/net_permissions.cpp
+++ b/src/net_permissions.cpp
@@ -15,6 +15,7 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{
"relay (relay even in -blocksonly mode)",
"mempool (allow requesting BIP35 mempool contents)",
"download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)",
+ "addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)"
};
namespace {
@@ -50,6 +51,7 @@ bool TryParsePermissionFlags(const std::string str, NetPermissionFlags& output,
else if (permission == "download") NetPermissions::AddFlag(flags, PF_DOWNLOAD);
else if (permission == "all") NetPermissions::AddFlag(flags, PF_ALL);
else if (permission == "relay") NetPermissions::AddFlag(flags, PF_RELAY);
+ else if (permission == "addr") NetPermissions::AddFlag(flags, PF_ADDR);
else if (permission.length() == 0); // Allow empty entries
else {
error = strprintf(_("Invalid P2P permission: '%s'"), permission);
@@ -75,6 +77,7 @@ std::vector<std::string> NetPermissions::ToStrings(NetPermissionFlags flags)
if (NetPermissions::HasFlag(flags, PF_RELAY)) strings.push_back("relay");
if (NetPermissions::HasFlag(flags, PF_MEMPOOL)) strings.push_back("mempool");
if (NetPermissions::HasFlag(flags, PF_DOWNLOAD)) strings.push_back("download");
+ if (NetPermissions::HasFlag(flags, PF_ADDR)) strings.push_back("addr");
return strings;
}
diff --git a/src/net_permissions.h b/src/net_permissions.h
index a9633ee2ae..5b68f635a7 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -29,10 +29,12 @@ enum NetPermissionFlags {
PF_NOBAN = (1U << 4) | PF_DOWNLOAD,
// Can query the mempool
PF_MEMPOOL = (1U << 5),
+ // Can request addrs without hitting a privacy-preserving cache
+ PF_ADDR = (1U << 7),
// True if the user did not specifically set fine grained permissions
PF_ISIMPLICIT = (1U << 31),
- PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD,
+ PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD | PF_ADDR,
};
class NetPermissions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index ef7f486e0b..72c8e65c6b 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -242,7 +242,7 @@ namespace {
/** When our tip was last updated. */
std::atomic<int64_t> g_last_tip_update(0);
- /** Relay map */
+ /** Relay map (txid or wtxid -> CTransactionRef) */
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */
@@ -404,7 +404,7 @@ struct CNodeState {
/* Track when to attempt download of announced transactions (process
* time in micros -> txid)
*/
- std::multimap<std::chrono::microseconds, uint256> m_tx_process_time;
+ std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time;
//! Store all the transactions a peer has recently announced
std::set<uint256> m_tx_announced;
@@ -757,34 +757,34 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
}
}
-void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- g_already_asked_for.erase(txid);
+ g_already_asked_for.erase(gtxid.GetHash());
}
-std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- auto it = g_already_asked_for.find(txid);
+ auto it = g_already_asked_for.find(gtxid.GetHash());
if (it != g_already_asked_for.end()) {
return it->second;
}
return {};
}
-void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- auto it = g_already_asked_for.find(txid);
+ auto it = g_already_asked_for.find(gtxid.GetHash());
if (it == g_already_asked_for.end()) {
- g_already_asked_for.insert(std::make_pair(txid, request_time));
+ g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time));
} else {
g_already_asked_for.update(it, request_time);
}
}
-std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::chrono::microseconds process_time;
- const auto last_request_time = GetTxRequestTime(txid);
+ const auto last_request_time = GetTxRequestTime(gtxid);
// First time requesting this tx
if (last_request_time.count() == 0) {
process_time = current_time;
@@ -803,23 +803,23 @@ std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chron
return process_time;
}
-void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
CNodeState::TxDownloadState& peer_download_state = state->m_tx_download;
if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
- peer_download_state.m_tx_announced.count(txid)) {
+ peer_download_state.m_tx_announced.count(gtxid.GetHash())) {
// Too many queued announcements from this peer, or we already have
// this announcement
return;
}
- peer_download_state.m_tx_announced.insert(txid);
+ peer_download_state.m_tx_announced.insert(gtxid.GetHash());
// Calculate the time to try requesting this transaction. Use
// fPreferredDownload as a proxy for outbound peers.
- const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0);
+ const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0);
- peer_download_state.m_tx_process_time.emplace(process_time, txid);
+ peer_download_state.m_tx_process_time.emplace(process_time, gtxid);
}
} // namespace
@@ -1448,9 +1448,9 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO
{
LOCK(g_cs_orphans);
- if (inv.type != MSG_WTX && mapOrphanTransactions.count(inv.hash)) {
+ if (!inv.IsMsgWtx() && mapOrphanTransactions.count(inv.hash)) {
return true;
- } else if (inv.type == MSG_WTX && g_orphans_by_wtxid.count(inv.hash)) {
+ } else if (inv.IsMsgWtx() && g_orphans_by_wtxid.count(inv.hash)) {
return true;
}
}
@@ -1460,8 +1460,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO
if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
}
- const bool by_wtxid = (inv.type == MSG_WTX);
- return recentRejects->contains(inv.hash) || mempool.exists(inv.hash, by_wtxid);
+ return recentRejects->contains(inv.hash) || mempool.exists(ToGenTxid(inv));
}
case MSG_BLOCK:
case MSG_WITNESS_BLOCK:
@@ -1679,9 +1678,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c
}
//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed).
-CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_or_wtxid, bool use_wtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
+CTransactionRef static FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
{
- auto txinfo = mempool.info(txid_or_wtxid, use_wtxid);
+ auto txinfo = mempool.info(gtxid);
if (txinfo.tx) {
// If a TX could have been INVed in reply to a MEMPOOL request,
// or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
@@ -1694,11 +1693,11 @@ CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_o
{
LOCK(cs_main);
// Otherwise, the transaction must have been announced recently.
- if (State(peer.GetId())->m_recently_announced_invs.contains(txid_or_wtxid)) {
+ if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
// If it was, it can be relayed from either the mempool...
if (txinfo.tx) return std::move(txinfo.tx);
// ... or the relay pool.
- auto mi = mapRelay.find(txid_or_wtxid);
+ auto mi = mapRelay.find(gtxid.GetHash());
if (mi != mapRelay.end()) return mi->second;
}
}
@@ -1722,7 +1721,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
// them.
- while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX || it->type == MSG_WTX)) {
+ while (it != pfrom.vRecvGetData.end() && it->IsGenTxMsg()) {
if (interruptMsgProc) return;
// The send buffer provides backpressure. If there's no space in
// the buffer, pause processing until the next call.
@@ -1735,10 +1734,10 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
continue;
}
- CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, inv.type == MSG_WTX, mempool_req, now);
+ CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now);
if (tx) {
// WTX and WITNESS_TX imply we serialize with witness
- int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
+ int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
mempool.RemoveUnbroadcastTx(tx->GetHash());
// As we're going to send tx, make sure its unconfirmed parents are made requestable.
@@ -2562,7 +2561,7 @@ void ProcessMessage(
if (!pfrom.IsAddrRelayPeer()) {
return;
}
- if (vAddr.size() > 1000)
+ if (vAddr.size() > MAX_ADDR_TO_SEND)
{
LOCK(cs_main);
Misbehaving(pfrom.GetId(), 20, strprintf("addr message size = %u", vAddr.size()));
@@ -2668,17 +2667,19 @@ void ProcessMessage(
if (interruptMsgProc)
return;
- // ignore INVs that don't match wtxidrelay setting
+ // Ignore INVs that don't match wtxidrelay setting.
+ // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
+ // This is fine as no INV messages are involved in that process.
if (State(pfrom.GetId())->m_wtxid_relay) {
- if (inv.type == MSG_TX) continue;
+ if (inv.IsMsgTx()) continue;
} else {
- if (inv.type == MSG_WTX) continue;
+ if (inv.IsMsgWtx()) continue;
}
bool fAlreadyHave = AlreadyHave(inv, mempool);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
- if (inv.type == MSG_TX) {
+ if (inv.IsMsgTx()) {
inv.type |= nFetchFlags;
}
@@ -2699,7 +2700,7 @@ void ProcessMessage(
pfrom.fDisconnect = true;
return;
} else if (!fAlreadyHave && !chainman.ActiveChainstate().IsInitialBlockDownload()) {
- RequestTx(State(pfrom.GetId()), inv.hash, current_time);
+ RequestTx(State(pfrom.GetId()), ToGenTxid(inv), current_time);
}
}
}
@@ -2952,9 +2953,11 @@ void ProcessMessage(
TxValidationState state;
- nodestate->m_tx_download.m_tx_announced.erase(hash);
- nodestate->m_tx_download.m_tx_in_flight.erase(hash);
- EraseTxRequest(hash);
+ for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) {
+ nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash());
+ nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
+ EraseTxRequest(gtxid);
+ }
std::list<CTransactionRef> lRemovedTxn;
@@ -3006,17 +3009,15 @@ void ProcessMessage(
uint32_t nFetchFlags = GetFetchFlags(pfrom);
const auto current_time = GetTime<std::chrono::microseconds>();
- if (!State(pfrom.GetId())->m_wtxid_relay) {
- for (const CTxIn& txin : tx.vin) {
- // Here, we only have the txid (and not wtxid) of the
- // inputs, so we only request parents from
- // non-wtxid-relay peers.
- // Eventually we should replace this with an improved
- // protocol for getting all unconfirmed parents.
- CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
- pfrom.AddKnownTx(txin.prevout.hash);
- if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time);
- }
+ for (const CTxIn& txin : tx.vin) {
+ // Here, we only have the txid (and not wtxid) of the
+ // inputs, so we only request in txid mode, even for
+ // wtxidrelay peers.
+ // Eventually we should replace this with an improved
+ // protocol for getting all unconfirmed parents.
+ CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
+ pfrom.AddKnownTx(txin.prevout.hash);
+ if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), ToGenTxid(_inv), current_time);
}
AddOrphanTx(ptx, pfrom.GetId());
@@ -3504,13 +3505,15 @@ void ProcessMessage(
pfrom.fSentAddr = true;
pfrom.vAddrToSend.clear();
- std::vector<CAddress> vAddr = connman.GetAddresses();
+ std::vector<CAddress> vAddr;
+ if (pfrom.HasPermission(PF_ADDR)) {
+ vAddr = connman.GetAddresses();
+ } else {
+ vAddr = connman.GetAddresses(pfrom.addr.GetNetwork());
+ }
FastRandomContext insecure_rand;
for (const CAddress &addr : vAddr) {
- bool banned_or_discouraged = banman && (banman->IsDiscouraged(addr) || banman->IsBanned(addr));
- if (!banned_or_discouraged) {
- pfrom.PushAddress(addr, insecure_rand);
- }
+ pfrom.PushAddress(addr, insecure_rand);
}
return;
}
@@ -3721,7 +3724,7 @@ void ProcessMessage(
vRecv >> vInv;
if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
for (CInv &inv : vInv) {
- if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX || inv.type == MSG_WTX) {
+ if (inv.IsGenTxMsg()) {
// If we receive a NOTFOUND message for a txid we requested, erase
// it from our data structures for this peer.
auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash);
@@ -4109,8 +4112,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
{
pto->m_addr_known->insert(addr.GetKey());
vAddr.push_back(addr);
- // receiver rejects addr messages larger than 1000
- if (vAddr.size() >= 1000)
+ // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
+ if (vAddr.size() >= MAX_ADDR_TO_SEND)
{
connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
vAddr.clear();
@@ -4388,6 +4391,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
+ CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
// Remove it from the to-be-sent set
pto->m_tx_relay->setInventoryTxToSend.erase(it);
// Check if not in the filter already
@@ -4395,7 +4399,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
continue;
}
// Not in the mempool anymore? don't bother sending it.
- auto txinfo = m_mempool.info(hash, state.m_wtxid_relay);
+ auto txinfo = m_mempool.info(ToGenTxid(inv));
if (!txinfo.tx) {
continue;
}
@@ -4408,7 +4412,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
// Send
State(pto->GetId())->m_recently_announced_invs.insert(hash);
- vInv.push_back(CInv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash));
+ vInv.push_back(inv);
nRelayedTransactions++;
{
// Expire old relay messages
@@ -4561,15 +4565,15 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
auto& tx_process_time = state.m_tx_download.m_tx_process_time;
while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
- const uint256 txid = tx_process_time.begin()->second;
+ const GenTxid gtxid = tx_process_time.begin()->second;
// Erase this entry from tx_process_time (it may be added back for
// processing at a later time, see below)
tx_process_time.erase(tx_process_time.begin());
- CInv inv(state.m_wtxid_relay ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), txid);
+ CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
if (!AlreadyHave(inv, m_mempool)) {
// If this transaction was last requested more than 1 minute ago,
// then request.
- const auto last_request_time = GetTxRequestTime(inv.hash);
+ const auto last_request_time = GetTxRequestTime(gtxid);
if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
vGetData.push_back(inv);
@@ -4577,8 +4581,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
vGetData.clear();
}
- UpdateTxRequestTime(inv.hash, current_time);
- state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time);
+ UpdateTxRequestTime(gtxid, current_time);
+ state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time);
} else {
// This transaction is in flight from someone else; queue
// up processing to happen after the download times out
@@ -4592,13 +4596,13 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// would open us up to an attacker using inbound
// wtxid-relay to prevent us from requesting transactions
// from outbound txid-relay peers).
- const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload, false);
- tx_process_time.emplace(next_process_time, txid);
+ const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false);
+ tx_process_time.emplace(next_process_time, gtxid);
}
} else {
// We have already seen this transaction, no need to download.
- state.m_tx_download.m_tx_announced.erase(inv.hash);
- state.m_tx_download.m_tx_in_flight.erase(inv.hash);
+ state.m_tx_download.m_tx_announced.erase(gtxid.GetHash());
+ state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
}
}
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 0aaba440b8..d29aed6c8b 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -28,19 +28,35 @@ CNetAddr::CNetAddr()
void CNetAddr::SetIP(const CNetAddr& ipIn)
{
+ m_net = ipIn.m_net;
memcpy(ip, ipIn.ip, sizeof(ip));
}
+void CNetAddr::SetLegacyIPv6(const uint8_t ipv6[16])
+{
+ if (memcmp(ipv6, pchIPv4, sizeof(pchIPv4)) == 0) {
+ m_net = NET_IPV4;
+ } else if (memcmp(ipv6, pchOnionCat, sizeof(pchOnionCat)) == 0) {
+ m_net = NET_ONION;
+ } else if (memcmp(ipv6, g_internal_prefix, sizeof(g_internal_prefix)) == 0) {
+ m_net = NET_INTERNAL;
+ } else {
+ m_net = NET_IPV6;
+ }
+ memcpy(ip, ipv6, 16);
+}
+
void CNetAddr::SetRaw(Network network, const uint8_t *ip_in)
{
switch(network)
{
case NET_IPV4:
+ m_net = NET_IPV4;
memcpy(ip, pchIPv4, 12);
memcpy(ip+12, ip_in, 4);
break;
case NET_IPV6:
- memcpy(ip, ip_in, 16);
+ SetLegacyIPv6(ip_in);
break;
default:
assert(!"invalid network");
@@ -66,6 +82,7 @@ bool CNetAddr::SetInternal(const std::string &name)
if (name.empty()) {
return false;
}
+ m_net = NET_INTERNAL;
unsigned char hash[32] = {};
CSHA256().Write((const unsigned char*)name.data(), name.size()).Finalize(hash);
memcpy(ip, g_internal_prefix, sizeof(g_internal_prefix));
@@ -89,6 +106,7 @@ bool CNetAddr::SetSpecial(const std::string &strName)
std::vector<unsigned char> vchAddr = DecodeBase32(strName.substr(0, strName.size() - 6).c_str());
if (vchAddr.size() != 16-sizeof(pchOnionCat))
return false;
+ m_net = NET_ONION;
memcpy(ip, pchOnionCat, sizeof(pchOnionCat));
for (unsigned int i=0; i<16-sizeof(pchOnionCat); i++)
ip[i + sizeof(pchOnionCat)] = vchAddr[i];
@@ -123,15 +141,9 @@ bool CNetAddr::IsBindAny() const
return true;
}
-bool CNetAddr::IsIPv4() const
-{
- return (memcmp(ip, pchIPv4, sizeof(pchIPv4)) == 0);
-}
+bool CNetAddr::IsIPv4() const { return m_net == NET_IPV4; }
-bool CNetAddr::IsIPv6() const
-{
- return (!IsIPv4() && !IsTor() && !IsInternal());
-}
+bool CNetAddr::IsIPv6() const { return m_net == NET_IPV6; }
bool CNetAddr::IsRFC1918() const
{
@@ -165,50 +177,54 @@ bool CNetAddr::IsRFC5737() const
bool CNetAddr::IsRFC3849() const
{
- return GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x0D && GetByte(12) == 0xB8;
+ return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 &&
+ GetByte(13) == 0x0D && GetByte(12) == 0xB8;
}
bool CNetAddr::IsRFC3964() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x02);
+ return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x02;
}
bool CNetAddr::IsRFC6052() const
{
static const unsigned char pchRFC6052[] = {0,0x64,0xFF,0x9B,0,0,0,0,0,0,0,0};
- return (memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0);
+ return IsIPv6() && memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0;
}
bool CNetAddr::IsRFC4380() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 && GetByte(12) == 0);
+ return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 &&
+ GetByte(12) == 0;
}
bool CNetAddr::IsRFC4862() const
{
static const unsigned char pchRFC4862[] = {0xFE,0x80,0,0,0,0,0,0};
- return (memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0);
+ return IsIPv6() && memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0;
}
bool CNetAddr::IsRFC4193() const
{
- return ((GetByte(15) & 0xFE) == 0xFC);
+ return IsIPv6() && (GetByte(15) & 0xFE) == 0xFC;
}
bool CNetAddr::IsRFC6145() const
{
static const unsigned char pchRFC6145[] = {0,0,0,0,0,0,0,0,0xFF,0xFF,0,0};
- return (memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0);
+ return IsIPv6() && memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0;
}
bool CNetAddr::IsRFC4843() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10);
+ return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 &&
+ GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10;
}
bool CNetAddr::IsRFC7343() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20);
+ return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 &&
+ GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20;
}
bool CNetAddr::IsHeNet() const
@@ -222,10 +238,7 @@ bool CNetAddr::IsHeNet() const
*
* @see CNetAddr::SetSpecial(const std::string &)
*/
-bool CNetAddr::IsTor() const
-{
- return (memcmp(ip, pchOnionCat, sizeof(pchOnionCat)) == 0);
-}
+bool CNetAddr::IsTor() const { return m_net == NET_ONION; }
bool CNetAddr::IsLocal() const
{
@@ -235,7 +248,7 @@ bool CNetAddr::IsLocal() const
// IPv6 loopback (::1/128)
static const unsigned char pchLocal[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1};
- if (memcmp(ip, pchLocal, 16) == 0)
+ if (IsIPv6() && memcmp(ip, pchLocal, 16) == 0)
return true;
return false;
@@ -259,12 +272,12 @@ bool CNetAddr::IsValid() const
// header20 vectorlen3 addr26 addr26 addr26 header20 vectorlen3 addr26 addr26 addr26...
// so if the first length field is garbled, it reads the second batch
// of addr misaligned by 3 bytes.
- if (memcmp(ip, pchIPv4+3, sizeof(pchIPv4)-3) == 0)
+ if (IsIPv6() && memcmp(ip, pchIPv4+3, sizeof(pchIPv4)-3) == 0)
return false;
// unspecified IPv6 address (::/128)
unsigned char ipNone6[16] = {};
- if (memcmp(ip, ipNone6, 16) == 0)
+ if (IsIPv6() && memcmp(ip, ipNone6, 16) == 0)
return false;
// documentation IPv6 address
@@ -311,7 +324,7 @@ bool CNetAddr::IsRoutable() const
*/
bool CNetAddr::IsInternal() const
{
- return memcmp(ip, g_internal_prefix, sizeof(g_internal_prefix)) == 0;
+ return m_net == NET_INTERNAL;
}
enum Network CNetAddr::GetNetwork() const
@@ -322,13 +335,7 @@ enum Network CNetAddr::GetNetwork() const
if (!IsRoutable())
return NET_UNROUTABLE;
- if (IsIPv4())
- return NET_IPV4;
-
- if (IsTor())
- return NET_ONION;
-
- return NET_IPV6;
+ return m_net;
}
std::string CNetAddr::ToStringIP() const
@@ -362,12 +369,12 @@ std::string CNetAddr::ToString() const
bool operator==(const CNetAddr& a, const CNetAddr& b)
{
- return (memcmp(a.ip, b.ip, 16) == 0);
+ return a.m_net == b.m_net && memcmp(a.ip, b.ip, 16) == 0;
}
bool operator<(const CNetAddr& a, const CNetAddr& b)
{
- return (memcmp(a.ip, b.ip, 16) < 0);
+ return a.m_net < b.m_net || (a.m_net == b.m_net && memcmp(a.ip, b.ip, 16) < 0);
}
/**
@@ -546,7 +553,7 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co
uint64_t CNetAddr::GetHash() const
{
- uint256 hash = Hash(&ip[0], &ip[16]);
+ uint256 hash = Hash(ip);
uint64_t nRet;
memcpy(&nRet, &hash, sizeof(nRet));
return nRet;
@@ -813,7 +820,7 @@ CSubNet::CSubNet(const CNetAddr &addr):
*/
bool CSubNet::Match(const CNetAddr &addr) const
{
- if (!valid || !addr.IsValid())
+ if (!valid || !addr.IsValid() || network.m_net != addr.m_net)
return false;
for(int x=0; x<16; ++x)
if ((addr.ip[x] & netmask[x]) != network.ip[x])
diff --git a/src/netaddress.h b/src/netaddress.h
index f2daad7fb6..0365907d44 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -16,21 +16,50 @@
#include <string>
#include <vector>
+/**
+ * A network type.
+ * @note An address may belong to more than one network, for example `10.0.0.1`
+ * belongs to both `NET_UNROUTABLE` and `NET_IPV4`.
+ * Keep these sequential starting from 0 and `NET_MAX` as the last entry.
+ * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate
+ * over all enum values and also `GetExtNetwork()` "extends" this enum by
+ * introducing standalone constants starting from `NET_MAX`.
+ */
enum Network
{
+ /// Addresses from these networks are not publicly routable on the global Internet.
NET_UNROUTABLE = 0,
+
+ /// IPv4
NET_IPV4,
+
+ /// IPv6
NET_IPV6,
+
+ /// TORv2
NET_ONION,
+
+ /// A set of dummy addresses that map a name to an IPv6 address. These
+ /// addresses belong to RFC4193's fc00::/7 subnet (unique-local addresses).
+ /// We use them to map a string or FQDN to an IPv6 address in CAddrMan to
+ /// keep track of which DNS seeds were used.
NET_INTERNAL,
+ /// Dummy value to indicate the number of NET_* constants.
NET_MAX,
};
-/** IP address (IPv6, or IPv4 using mapped IPv6 range (::FFFF:0:0/96)) */
+/**
+ * Network address.
+ */
class CNetAddr
{
protected:
+ /**
+ * Network to which this address belongs.
+ */
+ Network m_net{NET_IPV6};
+
unsigned char ip[16]; // in network byte order
uint32_t scopeId{0}; // for scoped/link-local ipv6 addresses
@@ -40,6 +69,14 @@ class CNetAddr
void SetIP(const CNetAddr& ip);
/**
+ * Set from a legacy IPv6 address.
+ * Legacy IPv6 address may be a normal IPv6 address, or another address
+ * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy
+ * `addr` encoding.
+ */
+ void SetLegacyIPv6(const uint8_t ipv6[16]);
+
+ /**
* Set raw IPv4 or IPv6 address (in network byte order)
* @note Only NET_IPV4 and NET_IPV6 are allowed for network.
*/
@@ -100,7 +137,27 @@ class CNetAddr
friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); }
friend bool operator<(const CNetAddr& a, const CNetAddr& b);
- SERIALIZE_METHODS(CNetAddr, obj) { READWRITE(obj.ip); }
+ /**
+ * Serialize to a stream.
+ */
+ template <typename Stream>
+ void Serialize(Stream& s) const
+ {
+ s << ip;
+ }
+
+ /**
+ * Unserialize from a stream.
+ */
+ template <typename Stream>
+ void Unserialize(Stream& s)
+ {
+ unsigned char ip_temp[sizeof(ip)];
+ s >> ip_temp;
+ // Use SetLegacyIPv6() so that m_net is set correctly. For example
+ // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4).
+ SetLegacyIPv6(ip_temp);
+ }
friend class CSubNet;
};
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 4514db578a..544bab6d9b 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -12,6 +12,8 @@
#include <serialize.h>
#include <uint256.h>
+#include <tuple>
+
static const int SERIALIZE_TRANSACTION_NO_WITNESS = 0x40000000;
/** An outpoint - a combination of a transaction hash and an index n into its vout */
@@ -388,4 +390,17 @@ typedef std::shared_ptr<const CTransaction> CTransactionRef;
static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); }
template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); }
+/** A generic txid reference (txid or wtxid). */
+class GenTxid
+{
+ const bool m_is_wtxid;
+ const uint256 m_hash;
+public:
+ GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {}
+ bool IsWtxid() const { return m_is_wtxid; }
+ const uint256& GetHash() const { return m_hash; }
+ friend bool operator==(const GenTxid& a, const GenTxid& b) { return a.m_is_wtxid == b.m_is_wtxid && a.m_hash == b.m_hash; }
+ friend bool operator<(const GenTxid& a, const GenTxid& b) { return std::tie(a.m_is_wtxid, a.m_hash) < std::tie(b.m_is_wtxid, b.m_hash); }
+};
+
#endif // BITCOIN_PRIMITIVES_TRANSACTION_H
diff --git a/src/protocol.cpp b/src/protocol.cpp
index ee77ca3b94..5a91acee0f 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -241,3 +241,9 @@ std::vector<std::string> serviceFlagsToStr(uint64_t flags)
return str_flags;
}
+
+GenTxid ToGenTxid(const CInv& inv)
+{
+ assert(inv.IsGenTxMsg());
+ return {inv.IsMsgWtx(), inv.hash};
+}
diff --git a/src/protocol.h b/src/protocol.h
index d83da2034a..1d0adaae6e 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -11,6 +11,7 @@
#define BITCOIN_PROTOCOL_H
#include <netaddress.h>
+#include <primitives/transaction.h>
#include <serialize.h>
#include <uint256.h>
#include <version.h>
@@ -63,100 +64,84 @@ namespace NetMsgType {
/**
* The version message provides information about the transmitting node to the
* receiving node at the beginning of a connection.
- * @see https://bitcoin.org/en/developer-reference#version
*/
extern const char* VERSION;
/**
* The verack message acknowledges a previously-received version message,
* informing the connecting node that it can begin to send other messages.
- * @see https://bitcoin.org/en/developer-reference#verack
*/
extern const char* VERACK;
/**
* The addr (IP address) message relays connection information for peers on the
* network.
- * @see https://bitcoin.org/en/developer-reference#addr
*/
extern const char* ADDR;
/**
* The inv message (inventory message) transmits one or more inventories of
* objects known to the transmitting peer.
- * @see https://bitcoin.org/en/developer-reference#inv
*/
extern const char* INV;
/**
* The getdata message requests one or more data objects from another node.
- * @see https://bitcoin.org/en/developer-reference#getdata
*/
extern const char* GETDATA;
/**
* The merkleblock message is a reply to a getdata message which requested a
* block using the inventory type MSG_MERKLEBLOCK.
* @since protocol version 70001 as described by BIP37.
- * @see https://bitcoin.org/en/developer-reference#merkleblock
*/
extern const char* MERKLEBLOCK;
/**
* The getblocks message requests an inv message that provides block header
* hashes starting from a particular point in the block chain.
- * @see https://bitcoin.org/en/developer-reference#getblocks
*/
extern const char* GETBLOCKS;
/**
* The getheaders message requests a headers message that provides block
* headers starting from a particular point in the block chain.
* @since protocol version 31800.
- * @see https://bitcoin.org/en/developer-reference#getheaders
*/
extern const char* GETHEADERS;
/**
* The tx message transmits a single transaction.
- * @see https://bitcoin.org/en/developer-reference#tx
*/
extern const char* TX;
/**
* The headers message sends one or more block headers to a node which
* previously requested certain headers with a getheaders message.
* @since protocol version 31800.
- * @see https://bitcoin.org/en/developer-reference#headers
*/
extern const char* HEADERS;
/**
* The block message transmits a single serialized block.
- * @see https://bitcoin.org/en/developer-reference#block
*/
extern const char* BLOCK;
/**
* The getaddr message requests an addr message from the receiving node,
* preferably one with lots of IP addresses of other receiving nodes.
- * @see https://bitcoin.org/en/developer-reference#getaddr
*/
extern const char* GETADDR;
/**
* The mempool message requests the TXIDs of transactions that the receiving
* node has verified as valid but which have not yet appeared in a block.
* @since protocol version 60002.
- * @see https://bitcoin.org/en/developer-reference#mempool
*/
extern const char* MEMPOOL;
/**
* The ping message is sent periodically to help confirm that the receiving
* peer is still connected.
- * @see https://bitcoin.org/en/developer-reference#ping
*/
extern const char* PING;
/**
* The pong message replies to a ping message, proving to the pinging node that
* the ponging node is still alive.
* @since protocol version 60001 as described by BIP31.
- * @see https://bitcoin.org/en/developer-reference#pong
*/
extern const char* PONG;
/**
* The notfound message is a reply to a getdata message which requested an
* object the receiving node does not have available for relay.
* @since protocol version 70001.
- * @see https://bitcoin.org/en/developer-reference#notfound
*/
extern const char* NOTFOUND;
/**
@@ -165,7 +150,6 @@ extern const char* NOTFOUND;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filterload
*/
extern const char* FILTERLOAD;
/**
@@ -174,7 +158,6 @@ extern const char* FILTERLOAD;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filteradd
*/
extern const char* FILTERADD;
/**
@@ -183,14 +166,12 @@ extern const char* FILTERADD;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filterclear
*/
extern const char* FILTERCLEAR;
/**
* Indicates that a node prefers to receive new block announcements via a
* "headers" message rather than an "inv".
* @since protocol version 70012 as described by BIP130.
- * @see https://bitcoin.org/en/developer-reference#sendheaders
*/
extern const char* SENDHEADERS;
/**
@@ -430,8 +411,19 @@ public:
std::string GetCommand() const;
std::string ToString() const;
+ // Single-message helper methods
+ bool IsMsgTx() const { return type == MSG_TX; }
+ bool IsMsgWtx() const { return type == MSG_WTX; }
+ bool IsMsgWitnessTx() const { return type == MSG_WITNESS_TX; }
+
+ // Combined-message helper methods
+ bool IsGenTxMsg() const { return type == MSG_TX || type == MSG_WTX || type == MSG_WITNESS_TX; }
+
int type;
uint256 hash;
};
+/** Convert a TX/WITNESS_TX/WTX CInv to a GenTxid. */
+GenTxid ToGenTxid(const CInv& inv);
+
#endif // BITCOIN_PROTOCOL_H
diff --git a/src/pubkey.h b/src/pubkey.h
index 4c28af4a4d..fcbc7e8416 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -157,13 +157,13 @@ public:
//! Get the KeyID of this public key (hash of its serialization)
CKeyID GetID() const
{
- return CKeyID(Hash160(vch, vch + size()));
+ return CKeyID(Hash160(MakeSpan(vch).first(size())));
}
//! Get the 256-bit hash of this public key.
uint256 GetHash() const
{
- return Hash(vch, vch + size());
+ return Hash(MakeSpan(vch).first(size()));
}
/*
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index ecb753a306..523f5c429b 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -411,14 +411,14 @@ WId BitcoinApplication::getMainWinId() const
return window->winId();
}
-static void SetupUIArgs()
+static void SetupUIArgs(ArgsManager& argsman)
{
- gArgs.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI);
+ argsman.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI);
}
int GuiMain(int argc, char* argv[])
@@ -454,7 +454,7 @@ int GuiMain(int argc, char* argv[])
/// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these
// Command-line options take precedence:
node->setupServerArgs();
- SetupUIArgs();
+ SetupUIArgs(gArgs);
std::string error;
if (!node->parseParameters(argc, argv, error)) {
node->initError(strprintf(Untranslated("Error parsing command line arguments: %s\n"), error));
diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp
index 443e2d047d..b880a99baf 100644
--- a/src/qt/test/apptests.cpp
+++ b/src/qt/test/apptests.cpp
@@ -83,7 +83,7 @@ void AppTests::appTests()
// Reset global state to avoid interfering with later tests.
LogInstance().DisconnectTestLogger();
AbortShutdown();
- UnloadBlockIndex();
+ UnloadBlockIndex(/* mempool */ nullptr);
WITH_LOCK(::cs_main, g_chainman.Reset());
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 2afc9a3d4a..f27373b57c 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -525,9 +525,9 @@ static UniValue getrawmempool(const JSONRPCRequest& request)
{RPCResult::Type::STR_HEX, "", "The transaction id"},
}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ, "", "",
+ RPCResult::Type::OBJ_DYN, "", "",
{
- {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
}},
},
RPCExamples{
@@ -556,7 +556,7 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
RPCResult::Type::ARR, "", "",
{{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
},
RPCExamples{
HelpExampleCli("getmempoolancestors", "\"mytxid\"")
@@ -616,9 +616,9 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request)
RPCResult::Type::ARR, "", "",
{{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ, "", "",
+ RPCResult::Type::OBJ_DYN, "", "",
{
- {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
}},
},
RPCExamples{
@@ -674,7 +674,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request)
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"},
},
RPCResult{
- RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()},
+ RPCResult::Type::OBJ, "", "", MempoolEntryDescription()},
RPCExamples{
HelpExampleCli("getmempoolentry", "\"mytxid\"")
+ HelpExampleRpc("getmempoolentry", "\"mytxid\"")
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 70caf6009a..cf856af6e9 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -601,7 +601,7 @@ static UniValue decodescript(const JSONRPCRequest& request)
UniValue sr(UniValue::VOBJ);
CScript segwitScr;
if (which_type == TxoutType::PUBKEY) {
- segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0].begin(), solutions_data[0].end())));
+ segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0])));
} else if (which_type == TxoutType::PUBKEYHASH) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]}));
} else {
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 9415bba585..39feb4ccc9 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -986,9 +986,9 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
else if (opcode == OP_SHA256)
CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_HASH160)
- CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data());
+ CHash160().Write(vch).Finalize(vchHash);
else if (opcode == OP_HASH256)
- CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
+ CHash256().Write(vch).Finalize(vchHash);
popstack(stack);
stack.push_back(vchHash);
}
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 1c4990791c..3a4882f280 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -16,10 +16,10 @@ typedef std::vector<unsigned char> valtype;
bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER;
unsigned nMaxDatacarrierBytes = MAX_OP_RETURN_RELAY;
-CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {}
+CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in)) {}
CScriptID::CScriptID(const ScriptHash& in) : BaseHash(static_cast<uint160>(in)) {}
-ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {}
+ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in)) {}
ScriptHash::ScriptHash(const CScriptID& in) : BaseHash(static_cast<uint160>(in)) {}
PKHash::PKHash(const CPubKey& pubkey) : BaseHash(pubkey.GetID()) {}
@@ -318,7 +318,7 @@ CScript GetScriptForWitness(const CScript& redeemscript)
std::vector<std::vector<unsigned char> > vSolutions;
TxoutType typ = Solver(redeemscript, vSolutions);
if (typ == TxoutType::PUBKEY) {
- return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0].begin(), vSolutions[0].end())));
+ return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0])));
} else if (typ == TxoutType::PUBKEYHASH) {
return GetScriptForDestination(WitnessV0KeyHash(uint160{vSolutions[0]}));
}
diff --git a/src/script/standard.h b/src/script/standard.h
index fd29353886..992e37675f 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -79,6 +79,9 @@ public:
{
return m_hash.size();
}
+
+ unsigned char* data() { return m_hash.data(); }
+ const unsigned char* data() const { return m_hash.data(); }
};
/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
diff --git a/src/span.h b/src/span.h
index 79f13c9203..4afb383a59 100644
--- a/src/span.h
+++ b/src/span.h
@@ -207,4 +207,16 @@ T& SpanPopBack(Span<T>& span)
return back;
}
+// Helper functions to safely cast to unsigned char pointers.
+inline unsigned char* UCharCast(char* c) { return (unsigned char*)c; }
+inline unsigned char* UCharCast(unsigned char* c) { return c; }
+inline const unsigned char* UCharCast(const char* c) { return (unsigned char*)c; }
+inline const unsigned char* UCharCast(const unsigned char* c) { return c; }
+
+// Helper function to safely convert a Span to a Span<[const] unsigned char>.
+template <typename T> constexpr auto UCharSpanCast(Span<T> s) -> Span<typename std::remove_pointer<decltype(UCharCast(s.data()))>::type> { return {UCharCast(s.data()), s.size()}; }
+
+/** Like MakeSpan, but for (const) unsigned char member types only. Only works for (un)signed char containers. */
+template <typename V> constexpr auto MakeUCharSpan(V&& v) -> decltype(UCharSpanCast(MakeSpan(std::forward<V>(v)))) { return UCharSpanCast(MakeSpan(std::forward<V>(v))); }
+
#endif
diff --git a/src/sync.cpp b/src/sync.cpp
index 10f0483189..4be13a3c48 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation)
const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, lock_stack);
const LockPair p2 = std::make_pair(c, i.first);
+ if (lockdata.lockorders.count(p2)) {
+ auto lock_stack_copy = lock_stack;
+ lock_stack.pop_back();
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
+ // potential_deadlock_detected() does not return.
+ }
+
+ lockdata.lockorders.emplace(p1, lock_stack);
lockdata.invlockorders.insert(p2);
- if (lockdata.lockorders.count(p2))
- potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
@@ -259,6 +264,17 @@ void DeleteLock(void* cs)
}
}
+bool LockStackEmpty()
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+ const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
+ if (it == lockdata.m_lock_stacks.end()) {
+ return true;
+ }
+ return it->second.empty();
+}
+
bool g_debug_lockorder_abort = true;
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 77327d8bfe..05ff2ee8a9 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -56,6 +56,7 @@ template <typename MutexType>
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
+bool LockStackEmpty();
/**
* Call abort() if a potential lock order deadlock bug is detected, instead of
@@ -64,13 +65,14 @@ void DeleteLock(void* cs);
*/
extern bool g_debug_lockorder_abort;
#else
-void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
-void static inline LeaveCritical() {}
-void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
+inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
+inline void LeaveCritical() {}
+inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
template <typename MutexType>
-void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
-void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
-void static inline DeleteLock(void* cs) {}
+inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
+inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+inline void DeleteLock(void* cs) {}
+inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index f64251fe32..bf5c587774 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -743,7 +743,7 @@ BOOST_AUTO_TEST_CASE(sha256d64)
in[j] = InsecureRandBits(8);
}
for (int j = 0; j < i; ++j) {
- CHash256().Write(in + 64 * j, 64).Finalize(out1 + 32 * j);
+ CHash256().Write({in + 64 * j, 64}).Finalize({out1 + 32 * j, 32});
}
SHA256D64(out2, in, i);
BOOST_CHECK(memcmp(out1, out2, 32 * i) == 0);
diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp
index 595cdf9abb..3edcf96495 100644
--- a/src/test/fuzz/crypto.cpp
+++ b/src/test/fuzz/crypto.cpp
@@ -44,8 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
}
- (void)hash160.Write(data.data(), data.size());
- (void)hash256.Write(data.data(), data.size());
+ (void)hash160.Write(data);
+ (void)hash256.Write(data);
(void)hmac_sha256.Write(data.data(), data.size());
(void)hmac_sha512.Write(data.data(), data.size());
(void)ripemd160.Write(data.data(), data.size());
@@ -54,9 +54,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)sha512.Write(data.data(), data.size());
(void)sip_hasher.Write(data.data(), data.size());
- (void)Hash(data.begin(), data.end());
+ (void)Hash(data);
(void)Hash160(data);
- (void)Hash160(data.begin(), data.end());
(void)sha512.Size();
break;
}
@@ -73,12 +72,12 @@ void test_one_input(const std::vector<uint8_t>& buffer)
switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 8)) {
case 0: {
data.resize(CHash160::OUTPUT_SIZE);
- hash160.Finalize(data.data());
+ hash160.Finalize(data);
break;
}
case 1: {
data.resize(CHash256::OUTPUT_SIZE);
- hash256.Finalize(data.data());
+ hash256.Finalize(data);
break;
}
case 2: {
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
index c746374c61..955b954700 100644
--- a/src/test/fuzz/key.cpp
+++ b/src/test/fuzz/key.cpp
@@ -85,7 +85,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
assert(negated_key == key);
}
- const uint256 random_uint256 = Hash(buffer.begin(), buffer.end());
+ const uint256 random_uint256 = Hash(buffer);
{
CKey child_key;
diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp
index ae531f4462..8a674ac1e9 100644
--- a/src/test/fuzz/net_permissions.cpp
+++ b/src/test/fuzz/net_permissions.cpp
@@ -24,6 +24,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
NetPermissionFlags::PF_FORCERELAY,
NetPermissionFlags::PF_NOBAN,
NetPermissionFlags::PF_MEMPOOL,
+ NetPermissionFlags::PF_ADDR,
NetPermissionFlags::PF_ISIMPLICIT,
NetPermissionFlags::PF_ALL,
}) :
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index fd35537c77..4e4c44266a 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(key_test1)
for (int n=0; n<16; n++)
{
std::string strMsg = strprintf("Very secret message %i: 11", n);
- uint256 hashMsg = Hash(strMsg.begin(), strMsg.end());
+ uint256 hashMsg = Hash(strMsg);
// normal signatures
@@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(key_test1)
std::vector<unsigned char> detsig, detsigc;
std::string strMsg = "Very deterministic message";
- uint256 hashMsg = Hash(strMsg.begin(), strMsg.end());
+ uint256 hashMsg = Hash(strMsg);
BOOST_CHECK(key1.Sign(hashMsg, detsig));
BOOST_CHECK(key1C.Sign(hashMsg, detsigc));
BOOST_CHECK(detsig == detsigc);
@@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests)
// When entropy is specified, we should see at least one high R signature within 20 signatures
CKey key = DecodeSecret(strSecret1);
std::string msg = "A message to be signed";
- uint256 msg_hash = Hash(msg.begin(), msg.end());
+ uint256 msg_hash = Hash(msg);
std::vector<unsigned char> sig;
bool found = false;
@@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests)
for (int i = 0; i < 256; ++i) {
sig.clear();
std::string msg = "A message to be signed" + ToString(i);
- msg_hash = Hash(msg.begin(), msg.end());
+ msg_hash = Hash(msg);
BOOST_CHECK(key.Sign(msg_hash, sig));
found = sig[3] == 0x20;
BOOST_CHECK(sig.size() <= 70);
@@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd, sizeof(rnd));
uint256 hash;
- CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin());
+ CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
// import the static test key
CKey key = DecodeSecret(strSecret1C);
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 03dce552fc..9bc7cc5dab 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -13,9 +13,9 @@ static uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vecto
uint256 hash = leaf;
for (std::vector<uint256>::const_iterator it = vMerkleBranch.begin(); it != vMerkleBranch.end(); ++it) {
if (nIndex & 1) {
- hash = Hash(it->begin(), it->end(), hash.begin(), hash.end());
+ hash = Hash(*it, hash);
} else {
- hash = Hash(hash.begin(), hash.end(), it->begin(), it->end());
+ hash = Hash(hash, *it);
}
nIndex >>= 1;
}
@@ -60,7 +60,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
}
}
mutated |= (inner[level] == h);
- CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(inner[level]).Write(h).Finalize(h);
}
// Store the resulting hash at inner position level.
inner[level] = h;
@@ -86,7 +86,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
if (pbranch && matchh) {
pbranch->push_back(h);
}
- CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(h).Write(h).Finalize(h);
// Increment count to the value it would have if two entries at this
// level had existed.
count += (((uint32_t)1) << level);
@@ -101,7 +101,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
matchh = true;
}
}
- CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(inner[level]).Write(h).Finalize(h);
level++;
}
}
@@ -144,8 +144,7 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve
// Two identical hashes at the end of the list at a particular level.
mutated = true;
}
- vMerkleTree.push_back(Hash(vMerkleTree[j+i].begin(), vMerkleTree[j+i].end(),
- vMerkleTree[j+i2].begin(), vMerkleTree[j+i2].end()));
+ vMerkleTree.push_back(Hash(vMerkleTree[j+i], vMerkleTree[j+i2]));
}
j += nSize;
}
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index ea3e633cc2..49073ea657 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -138,6 +138,14 @@ BOOST_AUTO_TEST_CASE(onioncat_test)
}
+BOOST_AUTO_TEST_CASE(embedded_test)
+{
+ CNetAddr addr1(ResolveIP("1.2.3.4"));
+ CNetAddr addr2(ResolveIP("::FFFF:0102:0304"));
+ BOOST_CHECK(addr2.IsIPv4());
+ BOOST_CHECK_EQUAL(addr1.ToString(), addr2.ToString());
+}
+
BOOST_AUTO_TEST_CASE(subnet_test)
{
@@ -158,12 +166,13 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_CHECK(ResolveSubNet("1.2.2.1/24").Match(ResolveIP("1.2.2.4")));
BOOST_CHECK(ResolveSubNet("1.2.2.110/31").Match(ResolveIP("1.2.2.111")));
BOOST_CHECK(ResolveSubNet("1.2.2.20/26").Match(ResolveIP("1.2.2.63")));
- // All-Matching IPv6 Matches arbitrary IPv4 and IPv6
+ // All-Matching IPv6 Matches arbitrary IPv6
BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1:2:3:4:5:6:7:1234")));
// But not `::` or `0.0.0.0` because they are considered invalid addresses
BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("::")));
BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("0.0.0.0")));
- BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4")));
+ // Addresses from one network (IPv4) don't belong to subnets of another network (IPv6)
+ BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4")));
// All-Matching IPv4 does not Match IPv6
BOOST_CHECK(!ResolveSubNet("0.0.0.0/0").Match(ResolveIP("1:2:3:4:5:6:7:1234")));
// Invalid subnets Match nothing (not even invalid addresses)
@@ -397,13 +406,14 @@ BOOST_AUTO_TEST_CASE(netpermissions_test)
BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, error));
const auto strings = NetPermissions::ToStrings(PF_ALL);
- BOOST_CHECK_EQUAL(strings.size(), 6U);
+ BOOST_CHECK_EQUAL(strings.size(), 7U);
BOOST_CHECK(std::find(strings.begin(), strings.end(), "bloomfilter") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "forcerelay") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "relay") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "noban") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "mempool") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "download") != strings.end());
+ BOOST_CHECK(std::find(strings.begin(), strings.end(), "addr") != strings.end());
}
BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters)
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index 77d748241b..87678af4d1 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -216,7 +216,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
s << OP_0 << ToByteVector(pubkey.GetID());
BOOST_CHECK(ExtractDestination(s, address));
WitnessV0KeyHash keyhash;
- CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(keyhash.begin());
+ CHash160().Write(pubkey).Finalize(keyhash);
BOOST_CHECK(boost::get<WitnessV0KeyHash>(&address) && *boost::get<WitnessV0KeyHash>(&address) == keyhash);
// TxoutType::WITNESS_V0_SCRIPTHASH
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index cb3ae290d1..0830743d61 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -282,7 +282,7 @@ public:
CScript scriptPubKey = script;
if (wm == WitnessMode::PKH) {
uint160 hash;
- CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin());
+ CHash160().Write(MakeSpan(script).subspan(1)).Finalize(hash);
script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG;
scriptPubKey = CScript() << witnessversion << ToByteVector(hash);
} else if (wm == WitnessMode::SH) {
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index c2328f931c..f625b67c2a 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -145,7 +145,7 @@ BOOST_AUTO_TEST_CASE(floats)
for (int i = 0; i < 1000; i++) {
ss << float(i);
}
- BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c"));
+ BOOST_CHECK(Hash(ss) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c"));
// decode
for (int i = 0; i < 1000; i++) {
@@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(doubles)
for (int i = 0; i < 1000; i++) {
ss << double(i);
}
- BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96"));
+ BOOST_CHECK(Hash(ss) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96"));
// decode
for (int i = 0; i < 1000; i++) {
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 1a2d775f49..91e039416c 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -228,7 +228,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup)
if (OnlyHasDefaultSectionSetting(settings, network, name)) desc += " ignored";
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 3ea8714f3a..19029ebd3c 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
{
LOCK2(mutex1, mutex2);
}
+ BOOST_CHECK(LockStackEmpty());
bool error_thrown = false;
try {
LOCK2(mutex2, mutex1);
@@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1");
error_thrown = true;
}
+ BOOST_CHECK(LockStackEmpty());
#ifdef DEBUG_LOCKORDER
BOOST_CHECK(error_thrown);
#else
@@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
TestPotentialDeadLockDetected(mutex1, mutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(mutex1, mutex2);
#ifdef DEBUG_LOCKORDER
g_debug_lockorder_abort = prev;
diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp
new file mode 100644
index 0000000000..a55145c738
--- /dev/null
+++ b/src/test/system_tests.cpp
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <test/util/setup_common.h>
+#include <util/system.h>
+#include <univalue.h>
+
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup)
+
+// At least one test is required (in case HAVE_BOOST_PROCESS is not defined).
+// Workaround for https://github.com/bitcoin/bitcoin/issues/19128
+BOOST_AUTO_TEST_CASE(dummy)
+{
+ BOOST_CHECK(true);
+}
+
+#ifdef HAVE_BOOST_PROCESS
+
+bool checkMessage(const std::runtime_error& ex)
+{
+ // On Linux & Mac: "No such file or directory"
+ // On Windows: "The system cannot find the file specified."
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("file") != std::string::npos);
+ return true;
+}
+
+bool checkMessageFalse(const std::runtime_error& ex)
+{
+ BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n"));
+ return true;
+}
+
+bool checkMessageStdErr(const std::runtime_error& ex)
+{
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos);
+ return checkMessage(ex);
+}
+
+BOOST_AUTO_TEST_CASE(run_command)
+{
+ {
+ const UniValue result = RunCommandParseJSON("");
+ BOOST_CHECK(result.isNull());
+ }
+ {
+#ifdef WIN32
+ // Windows requires single quotes to prevent escaping double quotes from the JSON...
+ const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'");
+#else
+ // ... but Linux and macOS echo a single quote if it's used
+ const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\"");
+#endif
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+ {
+ // An invalid command is handled by Boost
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed
+ }
+ {
+ // Return non-zero exit code, no output to stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse);
+ }
+ {
+ // Return non-zero exit code, with error message for stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr);
+ }
+ {
+ BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON
+ }
+ // Test std::in, except for Windows
+#ifndef WIN32
+ {
+ const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}");
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+#endif
+}
+#endif // HAVE_BOOST_PROCESS
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 24c0d6382b..14f65dcb7c 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -142,7 +142,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
::ChainstateActive().InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
assert(!::ChainstateActive().CanFlushToDisk());
- ::ChainstateActive().InitCoinsCache();
+ ::ChainstateActive().InitCoinsCache(1 << 23);
assert(::ChainstateActive().CanFlushToDisk());
if (!LoadGenesisBlock(chainparams)) {
throw std::runtime_error("LoadGenesisBlock failed.");
@@ -182,9 +182,9 @@ TestingSetup::~TestingSetup()
m_node.connman.reset();
m_node.banman.reset();
m_node.args = nullptr;
+ UnloadBlockIndex(m_node.mempool);
m_node.mempool = nullptr;
m_node.scheduler.reset();
- UnloadBlockIndex();
m_node.chainman->Reset();
m_node.chainman = nullptr;
pblocktree.reset();
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index a30e366028..15a2c1e300 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -1009,7 +1009,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
@@ -1112,7 +1112,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup)
}
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
@@ -2186,8 +2186,8 @@ BOOST_AUTO_TEST_CASE(message_hash)
std::string(1, (char)unsigned_tx.length()) +
unsigned_tx;
- const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end());
- const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end());
+ const uint256 signature_hash = Hash(unsigned_tx);
+ const uint256 message_hash1 = Hash(prefixed_message);
const uint256 message_hash2 = MessageHash(unsigned_tx);
BOOST_CHECK_EQUAL(message_hash1, message_hash2);
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
new file mode 100644
index 0000000000..2076a1096a
--- /dev/null
+++ b/src/test/validation_chainstate_tests.cpp
@@ -0,0 +1,74 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <random.h>
+#include <uint256.h>
+#include <consensus/validation.h>
+#include <sync.h>
+#include <test/util/setup_common.h>
+#include <validation.h>
+
+#include <vector>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(validation_chainstate_tests, TestingSetup)
+
+//! Test resizing coins-related CChainState caches during runtime.
+//!
+BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
+{
+ ChainstateManager manager;
+
+ //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
+ auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
+ Coin newcoin;
+ uint256 txid = InsecureRand256();
+ COutPoint outp{txid, 0};
+ newcoin.nHeight = 1;
+ newcoin.out.nValue = InsecureRand32();
+ newcoin.out.scriptPubKey.assign((uint32_t)56, 1);
+ coins_view.AddCoin(outp, std::move(newcoin), false);
+
+ return outp;
+ };
+
+ CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate());
+ c1.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+ WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23));
+
+ // Add a coin to the in-memory cache, upsize once, then downsize.
+ {
+ LOCK(::cs_main);
+ auto outpoint = add_coin(c1.CoinsTip());
+
+ // Set a meaningless bestblock value in the coinsview cache - otherwise we won't
+ // flush during ResizecoinsCaches() and will subsequently hit an assertion.
+ c1.CoinsTip().SetBestBlock(InsecureRand256());
+
+ BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint));
+
+ c1.ResizeCoinsCaches(
+ 1 << 24, // upsizing the coinsview cache
+ 1 << 22 // downsizing the coinsdb cache
+ );
+
+ // View should still have the coin cached, since we haven't destructed the cache on upsize.
+ BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint));
+
+ c1.ResizeCoinsCaches(
+ 1 << 22, // downsizing the coinsview cache
+ 1 << 23 // upsizing the coinsdb cache
+ );
+
+ // The view cache should be empty since we had to destruct to downsize.
+ BOOST_CHECK(!c1.CoinsTip().HaveCoinInCache(outpoint));
+ }
+
+ // Avoid triggering the address sanitizer.
+ WITH_LOCK(::cs_main, manager.Unload());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 0d149285ad..887a48124f 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -28,13 +28,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
// Create a legacy (IBD) chainstate.
//
- ENTER_CRITICAL_SECTION(cs_main);
- CChainState& c1 = manager.InitializeChainstate();
- LEAVE_CRITICAL_SECTION(cs_main);
+ CChainState& c1 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate());
chainstates.push_back(&c1);
c1.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
- WITH_LOCK(::cs_main, c1.InitCoinsCache());
+ WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23));
BOOST_CHECK(!manager.IsSnapshotActive());
BOOST_CHECK(!manager.IsSnapshotValidated());
@@ -56,13 +54,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
// Create a snapshot-based chainstate.
//
- ENTER_CRITICAL_SECTION(cs_main);
- CChainState& c2 = manager.InitializeChainstate(GetRandHash());
- LEAVE_CRITICAL_SECTION(cs_main);
+ CChainState& c2 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate(GetRandHash()));
chainstates.push_back(&c2);
c2.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
- WITH_LOCK(::cs_main, c2.InitCoinsCache());
+ WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23));
// Unlike c1, which doesn't have any blocks. Gets us different tip, height.
c2.LoadGenesisBlock(chainparams);
BlockValidationState _;
@@ -104,4 +100,54 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
WITH_LOCK(::cs_main, manager.Unload());
}
+//! Test rebalancing the caches associated with each chainstate.
+BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
+{
+ ChainstateManager manager;
+ size_t max_cache = 10000;
+ manager.m_total_coinsdb_cache = max_cache;
+ manager.m_total_coinstip_cache = max_cache;
+
+ std::vector<CChainState*> chainstates;
+
+ // Create a legacy (IBD) chainstate.
+ //
+ CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate());
+ chainstates.push_back(&c1);
+ c1.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+
+ {
+ LOCK(::cs_main);
+ c1.InitCoinsCache(1 << 23);
+ c1.CoinsTip().SetBestBlock(InsecureRand256());
+ manager.MaybeRebalanceCaches();
+ }
+
+ BOOST_CHECK_EQUAL(c1.m_coinstip_cache_size_bytes, max_cache);
+ BOOST_CHECK_EQUAL(c1.m_coinsdb_cache_size_bytes, max_cache);
+
+ // Create a snapshot-based chainstate.
+ //
+ CChainState& c2 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate(GetRandHash()));
+ chainstates.push_back(&c2);
+ c2.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+
+ {
+ LOCK(::cs_main);
+ c2.InitCoinsCache(1 << 23);
+ c2.CoinsTip().SetBestBlock(InsecureRand256());
+ manager.MaybeRebalanceCaches();
+ }
+
+ // Since both chainstates are considered to be in initial block download,
+ // the snapshot chainstate should take priority.
+ BOOST_CHECK_CLOSE(c1.m_coinstip_cache_size_bytes, max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(c1.m_coinsdb_cache_size_bytes, max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(c2.m_coinstip_cache_size_bytes, max_cache * 0.95, 1);
+ BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1);
+
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index a863e3a4d5..8bac914f05 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -21,7 +21,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
BlockManager blockman{};
CChainState chainstate{blockman};
chainstate.InitCoinsDB(/*cache_size_bytes*/ 1 << 10, /*in_memory*/ true, /*should_wipe*/ false);
- WITH_LOCK(::cs_main, chainstate.InitCoinsCache());
+ WITH_LOCK(::cs_main, chainstate.InitCoinsCache(1 << 10));
CTxMemPool tx_pool{};
constexpr bool is_64_bit = sizeof(void*) == 8;
@@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Without any coins in the cache, we shouldn't need to flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
// If the initial memory allocations of cacheCoins don't match these common
@@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
BOOST_TEST_MESSAGE("Exiting cache flush tests early due to unsupported arch");
@@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
}
@@ -100,26 +100,26 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 4; ++i) {
add_coin(view);
print_view_mem_usage(view);
- if (chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
+ if (chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
CoinsCacheSizeState::CRITICAL) {
break;
}
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
// Passing non-zero max mempool usage should allow us more headroom.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
for (int i{0}; i < 3; ++i) {
add_coin(view);
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
}
@@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
BOOST_CHECK(usage_percentage >= 0.9);
BOOST_CHECK(usage_percentage < 1);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
CoinsCacheSizeState::LARGE);
}
@@ -143,7 +143,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 1000; ++i) {
add_coin(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool),
+ chainstate.GetCoinsCacheSizeState(&tx_pool),
CoinsCacheSizeState::OK);
}
@@ -151,7 +151,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// preallocated memory that doesn't get reclaimed even after flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
view.SetBestBlock(InsecureRand256());
@@ -159,7 +159,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
}
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 047560f45d..72460e7c69 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -10,6 +10,7 @@
#include <random.h>
#include <shutdown.h>
#include <uint256.h>
+#include <util/memory.h>
#include <util/system.h>
#include <util/translation.h>
#include <util/vector.h>
@@ -39,35 +40,45 @@ struct CoinEntry {
}
-CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) : db(ldb_path, nCacheSize, fMemory, fWipe, true)
+CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) :
+ m_db(MakeUnique<CDBWrapper>(ldb_path, nCacheSize, fMemory, fWipe, true)),
+ m_ldb_path(ldb_path),
+ m_is_memory(fMemory) { }
+
+void CCoinsViewDB::ResizeCache(size_t new_cache_size)
{
+ // Have to do a reset first to get the original `m_db` state to release its
+ // filesystem lock.
+ m_db.reset();
+ m_db = MakeUnique<CDBWrapper>(
+ m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true);
}
bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const {
- return db.Read(CoinEntry(&outpoint), coin);
+ return m_db->Read(CoinEntry(&outpoint), coin);
}
bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const {
- return db.Exists(CoinEntry(&outpoint));
+ return m_db->Exists(CoinEntry(&outpoint));
}
uint256 CCoinsViewDB::GetBestBlock() const {
uint256 hashBestChain;
- if (!db.Read(DB_BEST_BLOCK, hashBestChain))
+ if (!m_db->Read(DB_BEST_BLOCK, hashBestChain))
return uint256();
return hashBestChain;
}
std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
std::vector<uint256> vhashHeadBlocks;
- if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
+ if (!m_db->Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
return std::vector<uint256>();
}
return vhashHeadBlocks;
}
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
- CDBBatch batch(db);
+ CDBBatch batch(*m_db);
size_t count = 0;
size_t changed = 0;
size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize);
@@ -105,7 +116,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
mapCoins.erase(itOld);
if (batch.SizeEstimate() > batch_size) {
LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
- db.WriteBatch(batch);
+ m_db->WriteBatch(batch);
batch.Clear();
if (crash_simulate) {
static FastRandomContext rng;
@@ -122,14 +133,14 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
batch.Write(DB_BEST_BLOCK, hashBlock);
LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
- bool ret = db.WriteBatch(batch);
+ bool ret = m_db->WriteBatch(batch);
LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return ret;
}
size_t CCoinsViewDB::EstimateSize() const
{
- return db.EstimateSize(DB_COIN, (char)(DB_COIN+1));
+ return m_db->EstimateSize(DB_COIN, (char)(DB_COIN+1));
}
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {
@@ -156,7 +167,7 @@ bool CBlockTreeDB::ReadLastBlockFile(int &nFile) {
CCoinsViewCursor *CCoinsViewDB::Cursor() const
{
- CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(db).NewIterator(), GetBestBlock());
+ CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock());
/* It seems that there are no "const iterators" for LevelDB. Since we
only need read operations on it, use a const-cast to get around
that restriction. */
@@ -335,7 +346,7 @@ public:
* Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout.
*/
bool CCoinsViewDB::Upgrade() {
- std::unique_ptr<CDBIterator> pcursor(db.NewIterator());
+ std::unique_ptr<CDBIterator> pcursor(m_db->NewIterator());
pcursor->Seek(std::make_pair(DB_COINS, uint256()));
if (!pcursor->Valid()) {
return true;
@@ -346,7 +357,7 @@ bool CCoinsViewDB::Upgrade() {
LogPrintf("[0%%]..."); /* Continued */
uiInterface.ShowProgress(_("Upgrading UTXO database").translated, 0, true);
size_t batch_size = 1 << 24;
- CDBBatch batch(db);
+ CDBBatch batch(*m_db);
int reportDone = 0;
std::pair<unsigned char, uint256> key;
std::pair<unsigned char, uint256> prev_key = {DB_COINS, uint256()};
@@ -380,9 +391,9 @@ bool CCoinsViewDB::Upgrade() {
}
batch.Erase(key);
if (batch.SizeEstimate() > batch_size) {
- db.WriteBatch(batch);
+ m_db->WriteBatch(batch);
batch.Clear();
- db.CompactRange(prev_key, key);
+ m_db->CompactRange(prev_key, key);
prev_key = key;
}
pcursor->Next();
@@ -390,8 +401,8 @@ bool CCoinsViewDB::Upgrade() {
break;
}
}
- db.WriteBatch(batch);
- db.CompactRange({DB_COINS, uint256()}, key);
+ m_db->WriteBatch(batch);
+ m_db->CompactRange({DB_COINS, uint256()}, key);
uiInterface.ShowProgress("", 100, false);
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return !ShutdownRequested();
diff --git a/src/txdb.h b/src/txdb.h
index 488c24f935..0cf7e2f1b8 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -39,11 +39,16 @@ static const int64_t max_filter_index_cache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
+// Actually declared in validation.cpp; can't include because of circular dependency.
+extern RecursiveMutex cs_main;
+
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
protected:
- CDBWrapper db;
+ std::unique_ptr<CDBWrapper> m_db;
+ fs::path m_ldb_path;
+ bool m_is_memory;
public:
/**
* @param[in] ldb_path Location in the filesystem where leveldb data will be stored.
@@ -60,6 +65,9 @@ public:
//! Attempt to update from an older database format. Returns whether an error occurred.
bool Upgrade();
size_t EstimateSize() const override;
+
+ //! Dynamically alter the underlying leveldb cache size.
+ void ResizeCache(size_t new_cache_size) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
};
/** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 1d9f6a4a46..de1a3ec68f 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -811,15 +811,17 @@ CTransactionRef CTxMemPool::get(const uint256& hash) const
return i->GetSharedTx();
}
-TxMempoolInfo CTxMemPool::info(const uint256& hash, bool wtxid) const
+TxMempoolInfo CTxMemPool::info(const GenTxid& gtxid) const
{
LOCK(cs);
- indexed_transaction_set::const_iterator i = (wtxid ? get_iter_from_wtxid(hash) : mapTx.find(hash));
+ indexed_transaction_set::const_iterator i = (gtxid.IsWtxid() ? get_iter_from_wtxid(gtxid.GetHash()) : mapTx.find(gtxid.GetHash()));
if (i == mapTx.end())
return TxMempoolInfo();
return GetInfo(i);
}
+TxMempoolInfo CTxMemPool::info(const uint256& txid) const { return info(GenTxid{false, txid}); }
+
void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeDelta)
{
{
diff --git a/src/txmempool.h b/src/txmempool.h
index d4e9845942..4743e1b63a 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -716,14 +716,15 @@ public:
return totalTxSize;
}
- bool exists(const uint256& hash, bool wtxid=false) const
+ bool exists(const GenTxid& gtxid) const
{
LOCK(cs);
- if (wtxid) {
- return (mapTx.get<index_by_wtxid>().count(hash) != 0);
+ if (gtxid.IsWtxid()) {
+ return (mapTx.get<index_by_wtxid>().count(gtxid.GetHash()) != 0);
}
- return (mapTx.count(hash) != 0);
+ return (mapTx.count(gtxid.GetHash()) != 0);
}
+ bool exists(const uint256& txid) const { return exists(GenTxid{false, txid}); }
CTransactionRef get(const uint256& hash) const;
txiter get_iter_from_wtxid(const uint256& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
@@ -731,7 +732,8 @@ public:
AssertLockHeld(cs);
return mapTx.project<0>(mapTx.get<index_by_wtxid>().find(wtxid));
}
- TxMempoolInfo info(const uint256& hash, bool wtxid=false) const;
+ TxMempoolInfo info(const uint256& hash) const;
+ TxMempoolInfo info(const GenTxid& gtxid) const;
std::vector<TxMempoolInfo> infoAll() const;
size_t DynamicMemoryUsage() const;
diff --git a/src/uint256.cpp b/src/uint256.cpp
index a943e71062..a5dfba41e2 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -12,20 +12,20 @@
template <unsigned int BITS>
base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch)
{
- assert(vch.size() == sizeof(data));
- memcpy(data, vch.data(), sizeof(data));
+ assert(vch.size() == sizeof(m_data));
+ memcpy(m_data, vch.data(), sizeof(m_data));
}
template <unsigned int BITS>
std::string base_blob<BITS>::GetHex() const
{
- return HexStr(std::reverse_iterator<const uint8_t*>(data + sizeof(data)), std::reverse_iterator<const uint8_t*>(data));
+ return HexStr(std::reverse_iterator<const uint8_t*>(m_data + sizeof(m_data)), std::reverse_iterator<const uint8_t*>(m_data));
}
template <unsigned int BITS>
void base_blob<BITS>::SetHex(const char* psz)
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
// skip leading spaces
while (IsSpace(*psz))
@@ -39,7 +39,7 @@ void base_blob<BITS>::SetHex(const char* psz)
size_t digits = 0;
while (::HexDigit(psz[digits]) != -1)
digits++;
- unsigned char* p1 = (unsigned char*)data;
+ unsigned char* p1 = (unsigned char*)m_data;
unsigned char* pend = p1 + WIDTH;
while (digits > 0 && p1 < pend) {
*p1 = ::HexDigit(psz[--digits]);
diff --git a/src/uint256.h b/src/uint256.h
index b36598f572..8ab747ef49 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -18,11 +18,11 @@ class base_blob
{
protected:
static constexpr int WIDTH = BITS / 8;
- uint8_t data[WIDTH];
+ uint8_t m_data[WIDTH];
public:
base_blob()
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
}
explicit base_blob(const std::vector<unsigned char>& vch);
@@ -30,17 +30,17 @@ public:
bool IsNull() const
{
for (int i = 0; i < WIDTH; i++)
- if (data[i] != 0)
+ if (m_data[i] != 0)
return false;
return true;
}
void SetNull()
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
}
- inline int Compare(const base_blob& other) const { return memcmp(data, other.data, sizeof(data)); }
+ inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); }
friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
@@ -51,34 +51,37 @@ public:
void SetHex(const std::string& str);
std::string ToString() const;
+ const unsigned char* data() const { return m_data; }
+ unsigned char* data() { return m_data; }
+
unsigned char* begin()
{
- return &data[0];
+ return &m_data[0];
}
unsigned char* end()
{
- return &data[WIDTH];
+ return &m_data[WIDTH];
}
const unsigned char* begin() const
{
- return &data[0];
+ return &m_data[0];
}
const unsigned char* end() const
{
- return &data[WIDTH];
+ return &m_data[WIDTH];
}
unsigned int size() const
{
- return sizeof(data);
+ return sizeof(m_data);
}
uint64_t GetUint64(int pos) const
{
- const uint8_t* ptr = data + pos * 8;
+ const uint8_t* ptr = m_data + pos * 8;
return ((uint64_t)ptr[0]) | \
((uint64_t)ptr[1]) << 8 | \
((uint64_t)ptr[2]) << 16 | \
@@ -92,13 +95,13 @@ public:
template<typename Stream>
void Serialize(Stream& s) const
{
- s.write((char*)data, sizeof(data));
+ s.write((char*)m_data, sizeof(m_data));
}
template<typename Stream>
void Unserialize(Stream& s)
{
- s.read((char*)data, sizeof(data));
+ s.read((char*)m_data, sizeof(m_data));
}
};
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8164e884b1..7b74789b32 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -6,6 +6,10 @@
#include <sync.h>
#include <util/system.h>
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
#include <chainparamsbase.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -1021,7 +1025,7 @@ bool FileCommit(FILE *file)
return false;
}
#else
- #if defined(__linux__) || defined(__NetBSD__)
+ #if defined(HAVE_FDATASYNC)
if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync
LogPrintf("%s: fdatasync failed: %d\n", __func__, errno);
return false;
@@ -1161,6 +1165,43 @@ void runCommand(const std::string& strCommand)
}
#endif
+#ifdef HAVE_BOOST_PROCESS
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in)
+{
+ namespace bp = boost::process;
+
+ UniValue result_json;
+ bp::opstream stdin_stream;
+ bp::ipstream stdout_stream;
+ bp::ipstream stderr_stream;
+
+ if (str_command.empty()) return UniValue::VNULL;
+
+ bp::child c(
+ str_command,
+ bp::std_out > stdout_stream,
+ bp::std_err > stderr_stream,
+ bp::std_in < stdin_stream
+ );
+ if (!str_std_in.empty()) {
+ stdin_stream << str_std_in << std::endl;
+ }
+ stdin_stream.pipe().close();
+
+ std::string result;
+ std::string error;
+ std::getline(stdout_stream, result);
+ std::getline(stderr_stream, error);
+
+ c.wait();
+ const int n_error = c.exit_code();
+ if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error));
+ if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result);
+
+ return result_json;
+}
+#endif // HAVE_BOOST_PROCESS
+
void SetupEnvironment()
{
#ifdef HAVE_MALLOPT_ARENA_MAX
diff --git a/src/util/system.h b/src/util/system.h
index 0bd14cc9ea..1df194ca84 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -37,6 +37,8 @@
#include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted
+class UniValue;
+
// Application startup time (used for uptime calculation)
int64_t GetStartupTime();
@@ -96,6 +98,16 @@ std::string ShellEscape(const std::string& arg);
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
+#ifdef HAVE_BOOST_PROCESS
+/**
+ * Execute a command which returns JSON, and parse the result.
+ *
+ * @param str_command The command to execute, including any arguments
+ * @param str_std_in string to pass to stdin
+ * @return parsed JSON
+ */
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in="");
+#endif // HAVE_BOOST_PROCESS
/**
* Most paths passed as configuration arguments are treated as relative to
diff --git a/src/validation.cpp b/src/validation.cpp
index f3e08d333b..84c106f064 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -139,7 +139,6 @@ bool fPruneMode = false;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
-size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
@@ -1273,9 +1272,10 @@ void CChainState::InitCoinsDB(
leveldb_name, cache_size_bytes, in_memory, should_wipe);
}
-void CChainState::InitCoinsCache()
+void CChainState::InitCoinsCache(size_t cache_size_bytes)
{
assert(m_coins_views != nullptr);
+ m_coinstip_cache_size_bytes = cache_size_bytes;
m_coins_views->InitCache();
}
@@ -2228,20 +2228,20 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
return true;
}
-CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
{
return this->GetCoinsCacheSizeState(
tx_pool,
- nCoinCacheUsage,
+ m_coinstip_cache_size_bytes,
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
}
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
- const CTxMemPool& tx_pool,
+ const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes)
{
- int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
+ const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
int64_t nTotalSpace =
max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
@@ -2280,7 +2280,7 @@ bool CChainState::FlushStateToDisk(
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
- CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(::mempool);
+ CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&::mempool);
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
if (nManualPruneHeight > 0) {
@@ -3435,7 +3435,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
if (commitpos == -1) {
uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
- CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin());
+ CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
CTxOut out;
out.nValue = 0;
out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
@@ -3580,7 +3580,7 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
}
- CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
+ CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
}
@@ -4307,7 +4307,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
- if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= nCoinCacheUsage) {
+ if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= ::ChainstateActive().m_coinstip_cache_size_bytes) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
@@ -4588,13 +4588,13 @@ void CChainState::UnloadBlockIndex() {
// May NOT be used after any connections are up as much
// of the peer-processing logic assumes a consistent
// block index state
-void UnloadBlockIndex()
+void UnloadBlockIndex(CTxMemPool* mempool)
{
LOCK(cs_main);
g_chainman.Unload();
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
- mempool.clear();
+ if (mempool) mempool->clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
setDirtyBlockIndex.clear();
@@ -4970,6 +4970,39 @@ std::string CChainState::ToString()
tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
}
+bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
+{
+ if (coinstip_size == m_coinstip_cache_size_bytes &&
+ coinsdb_size == m_coinsdb_cache_size_bytes) {
+ // Cache sizes are unchanged, no need to continue.
+ return true;
+ }
+ size_t old_coinstip_size = m_coinstip_cache_size_bytes;
+ m_coinstip_cache_size_bytes = coinstip_size;
+ m_coinsdb_cache_size_bytes = coinsdb_size;
+ CoinsDB().ResizeCache(coinsdb_size);
+
+ LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
+ this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
+ LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
+ this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
+
+ BlockValidationState state;
+ const CChainParams& chainparams = Params();
+
+ bool ret;
+
+ if (coinstip_size > old_coinstip_size) {
+ // Likely no need to flush if cache sizes have grown.
+ ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
+ } else {
+ // Otherwise, flush state to disk and deallocate the in-memory coins map.
+ ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
+ CoinsTip().ReallocateCache();
+ }
+ return ret;
+}
+
std::string CBlockFileInfo::ToString() const
{
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
@@ -5278,3 +5311,33 @@ void ChainstateManager::Reset()
m_active_chainstate = nullptr;
m_snapshot_validated = false;
}
+
+void ChainstateManager::MaybeRebalanceCaches()
+{
+ if (m_ibd_chainstate && !m_snapshot_chainstate) {
+ LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
+ // Allocate everything to the IBD chainstate.
+ m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
+ }
+ else if (m_snapshot_chainstate && !m_ibd_chainstate) {
+ LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
+ // Allocate everything to the snapshot chainstate.
+ m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
+ }
+ else if (m_ibd_chainstate && m_snapshot_chainstate) {
+ // If both chainstates exist, determine who needs more cache based on IBD status.
+ //
+ // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
+ if (m_snapshot_chainstate->IsInitialBlockDownload()) {
+ m_ibd_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
+ m_snapshot_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
+ } else {
+ m_snapshot_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
+ m_ibd_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
+ }
+ }
+}
diff --git a/src/validation.h b/src/validation.h
index 2dd83a5c86..534162d64a 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -127,7 +127,6 @@ extern bool g_parallel_script_checks;
extern bool fRequireStandard;
extern bool fCheckBlockIndex;
extern bool fCheckpointsEnabled;
-extern size_t nCoinCacheUsage;
/** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */
extern CFeeRate minRelayTxFee;
/** If the tip is older than this (in seconds), the node is considered to be in initial block download. */
@@ -161,7 +160,7 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi
/** Ensures we have a genesis block in the block tree, possibly writing one to disk. */
bool LoadGenesisBlock(const CChainParams& chainparams);
/** Unload database information */
-void UnloadBlockIndex();
+void UnloadBlockIndex(CTxMemPool* mempool);
/** Run an instance of the script checking thread */
void ThreadScriptCheck(int worker_num);
/**
@@ -532,7 +531,7 @@ public:
//! Initialize the in-memory coins cache (to be done after the health of the on-disk database
//! is verified).
- void InitCoinsCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ void InitCoinsCache(size_t cache_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! @returns whether or not the CoinsViews object has been fully initialized and we can
//! safely flush this object to disk.
@@ -581,6 +580,17 @@ public:
//! Destructs all objects related to accessing the UTXO set.
void ResetCoinsViews() { m_coins_views.reset(); }
+ //! The cache size of the on-disk coins view.
+ size_t m_coinsdb_cache_size_bytes{0};
+
+ //! The cache size of the in-memory coins view.
+ size_t m_coinstip_cache_size_bytes{0};
+
+ //! Resize the CoinsViews caches dynamically and flush state to disk.
+ //! @returns true unless an error occurred during the flush.
+ bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
@@ -664,11 +674,11 @@ public:
//! Dictates whether we need to flush the cache to disk or not.
//!
//! @return the state of the size of the coins cache.
- CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+ CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
CoinsCacheSizeState GetCoinsCacheSizeState(
- const CTxMemPool& tx_pool,
+ const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
@@ -797,6 +807,14 @@ public:
//! chainstate to avoid duplicating block metadata.
BlockManager m_blockman GUARDED_BY(::cs_main);
+ //! The total number of bytes available for us to use across all in-memory
+ //! coins caches. This will be split somehow across chainstates.
+ int64_t m_total_coinstip_cache{0};
+ //
+ //! The total number of bytes available for us to use across all leveldb
+ //! coins databases. This will be split somehow across chainstates.
+ int64_t m_total_coinsdb_cache{0};
+
//! Instantiate a new chainstate and assign it based upon whether it is
//! from a snapshot.
//!
@@ -885,6 +903,10 @@ public:
//! Clear (deconstruct) chainstate data.
void Reset();
+
+ //! Check to see if caches are out of balance and if so, call
+ //! ResizeCoinsCaches() as needed.
+ void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
/** DEPRECATED! Please use node.chainman instead. May only be used in validation.cpp internally */
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 1953be2d54..a04311fdf5 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -32,12 +32,12 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena
int ret = db.get_mpf()->get_fileid(fileid.value);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (get_fileid failed with %d)", filename, ret));
}
for (const auto& item : env.m_fileids) {
if (fileid == item.second && &fileid != &item.second) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s)", filename,
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename,
HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first));
}
}
@@ -97,9 +97,8 @@ void BerkeleyEnvironment::Close()
fDbEnvInit = false;
for (auto& db : m_databases) {
- auto count = mapFileUseCount.find(db.first);
- assert(count == mapFileUseCount.end() || count->second == 0);
BerkeleyDatabase& database = db.second.get();
+ assert(database.m_refcount <= 0);
if (database.m_db) {
database.m_db->close(0);
database.m_db.reset();
@@ -232,16 +231,6 @@ BerkeleyEnvironment::BerkeleyEnvironment()
fMockDb = true;
}
-bool BerkeleyEnvironment::Verify(const std::string& strFile)
-{
- LOCK(cs_db);
- assert(mapFileUseCount.count(strFile) == 0);
-
- Db db(dbenv.get(), 0);
- int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
- return result == 0;
-}
-
BerkeleyBatch::SafeDbt::SafeDbt()
{
m_dbt.set_flags(DB_DBT_MALLOC);
@@ -295,7 +284,11 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
if (fs::exists(file_path))
{
- if (!env->Verify(strFile)) {
+ assert(m_refcount == 0);
+
+ Db db(env->dbenv.get(), 0);
+ int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
+ if (result != 0) {
errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), file_path);
return false;
}
@@ -316,6 +309,8 @@ BerkeleyDatabase::~BerkeleyDatabase()
{
if (env) {
LOCK(cs_db);
+ env->CloseDb(strFile);
+ assert(!m_db);
size_t erased = env->m_databases.erase(strFile);
assert(erased == 1);
env->m_fileids.erase(strFile);
@@ -324,14 +319,24 @@ BerkeleyDatabase::~BerkeleyDatabase()
BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database)
{
+ database.AddRef();
+ database.Open(pszMode);
fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w'));
fFlushOnClose = fFlushOnCloseIn;
env = database.env.get();
- if (database.IsDummy()) {
- return;
+ pdb = database.m_db.get();
+ strFile = database.strFile;
+ bool fCreate = strchr(pszMode, 'c') != nullptr;
+ if (fCreate && !Exists(std::string("version"))) {
+ bool fTmp = fReadOnly;
+ fReadOnly = false;
+ Write(std::string("version"), CLIENT_VERSION);
+ fReadOnly = fTmp;
}
- const std::string &strFilename = database.strFile;
+}
+void BerkeleyDatabase::Open(const char* pszMode)
+{
bool fCreate = strchr(pszMode, 'c') != nullptr;
unsigned int nFlags = DB_THREAD;
if (fCreate)
@@ -341,10 +346,9 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo
LOCK(cs_db);
bilingual_str open_err;
if (!env->Open(open_err))
- throw std::runtime_error("BerkeleyBatch: Failed to open database environment.");
+ throw std::runtime_error("BerkeleyDatabase: Failed to open database environment.");
- pdb = database.m_db.get();
- if (pdb == nullptr) {
+ if (m_db == nullptr) {
int ret;
std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0);
@@ -353,60 +357,33 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo
DbMpoolFile* mpf = pdb_temp->get_mpf();
ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Failed to configure for no temp file backing for database %s", strFilename));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Failed to configure for no temp file backing for database %s", strFile));
}
}
ret = pdb_temp->open(nullptr, // Txn pointer
- fMockDb ? nullptr : strFilename.c_str(), // Filename
- fMockDb ? strFilename.c_str() : "main", // Logical db name
+ fMockDb ? nullptr : strFile.c_str(), // Filename
+ fMockDb ? strFile.c_str() : "main", // Logical db name
DB_BTREE, // Database type
nFlags, // Flags
0);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Error %d, can't open database %s", ret, strFile));
}
+ m_file_path = (env->Directory() / strFile).string();
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
- //
- // Also call CheckUniqueFileid on all the other g_dbenvs to prevent
- // bitcoin from opening the same data file through another
- // environment when the file is referenced through equivalent but
- // not obviously identical symlinked or hard linked or bind mounted
- // paths. In the future a more relaxed check for equal inode and
- // device ids could be done instead, which would allow opening
- // different backup copies of a wallet at the same time. Maybe even
- // more ideally, an exclusive lock for accessing the database could
- // be implemented, so no equality checks are needed at all. (Newer
- // versions of BDB have an set_lk_exclusive method for this
- // purpose, but the older version we use does not.)
- for (const auto& env : g_dbenvs) {
- CheckUniqueFileid(*env.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]);
- }
+ CheckUniqueFileid(*env, strFile, *pdb_temp, this->env->m_fileids[strFile]);
- pdb = pdb_temp.release();
- database.m_db.reset(pdb);
+ m_db.reset(pdb_temp.release());
- if (fCreate && !Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
}
- database.AddRef();
- strFile = strFilename;
}
}
-void BerkeleyDatabase::Open(const char* mode)
-{
- throw std::logic_error("BerkeleyDatabase does not implement Open. This function should not be called.");
-}
-
void BerkeleyBatch::Flush()
{
if (activeTxn)
@@ -427,6 +404,12 @@ void BerkeleyDatabase::IncrementUpdateCounter()
++nUpdateCounter;
}
+BerkeleyBatch::~BerkeleyBatch()
+{
+ Close();
+ m_database.RemoveRef();
+}
+
void BerkeleyBatch::Close()
{
if (!pdb)
@@ -439,8 +422,6 @@ void BerkeleyBatch::Close()
if (fFlushOnClose)
Flush();
-
- m_database.RemoveRef();
}
void BerkeleyEnvironment::CloseDb(const std::string& strFile)
@@ -464,8 +445,8 @@ void BerkeleyEnvironment::ReloadDbEnv()
AssertLockNotHeld(cs_db);
std::unique_lock<RecursiveMutex> lock(cs_db);
m_db_in_use.wait(lock, [this](){
- for (auto& count : mapFileUseCount) {
- if (count.second > 0) return false;
+ for (auto& db : m_databases) {
+ if (db.second.get().m_refcount > 0) return false;
}
return true;
});
@@ -487,17 +468,14 @@ void BerkeleyEnvironment::ReloadDbEnv()
bool BerkeleyDatabase::Rewrite(const char* pszSkip)
{
- if (IsDummy()) {
- return true;
- }
while (true) {
{
LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) {
+ if (m_refcount <= 0) {
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
+ m_refcount = -1;
bool fSuccess = true;
LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
@@ -581,10 +559,11 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
return;
{
LOCK(cs_db);
- std::map<std::string, int>::iterator mi = mapFileUseCount.begin();
- while (mi != mapFileUseCount.end()) {
- std::string strFile = (*mi).first;
- int nRefCount = (*mi).second;
+ bool no_dbs_accessed = true;
+ for (auto& db_it : m_databases) {
+ std::string strFile = db_it.first;
+ int nRefCount = db_it.second.get().m_refcount;
+ if (nRefCount < 0) continue;
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
if (nRefCount == 0) {
// Move log data to the dat file
@@ -595,14 +574,15 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
if (!fMockDb)
dbenv->lsn_reset(strFile.c_str(), 0);
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile);
- mapFileUseCount.erase(mi++);
- } else
- mi++;
+ nRefCount = -1;
+ } else {
+ no_dbs_accessed = false;
+ }
}
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
if (fShutdown) {
char** listp;
- if (mapFileUseCount.empty()) {
+ if (no_dbs_accessed) {
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
@@ -615,21 +595,17 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
bool BerkeleyDatabase::PeriodicFlush()
{
- // There's nothing to do for dummy databases. Return true.
- if (IsDummy()) return true;
-
// Don't flush if we can't acquire the lock.
TRY_LOCK(cs_db, lockDb);
if (!lockDb) return false;
// Don't flush if any databases are in use
- for (const auto& use_count : env->mapFileUseCount) {
- if (use_count.second > 0) return false;
+ for (auto& it : env->m_databases) {
+ if (it.second.get().m_refcount > 0) return false;
}
// Don't flush if there haven't been any batch writes for this database.
- auto it = env->mapFileUseCount.find(strFile);
- if (it == env->mapFileUseCount.end()) return false;
+ if (m_refcount < 0) return false;
LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile);
int64_t nStart = GetTimeMillis();
@@ -637,7 +613,7 @@ bool BerkeleyDatabase::PeriodicFlush()
// Flush wallet file so it's self contained
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(it);
+ m_refcount = -1;
LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
@@ -646,19 +622,15 @@ bool BerkeleyDatabase::PeriodicFlush()
bool BerkeleyDatabase::Backup(const std::string& strDest) const
{
- if (IsDummy()) {
- return false;
- }
while (true)
{
{
LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0)
+ if (m_refcount <= 0)
{
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
// Copy wallet file
fs::path pathSrc = env->Directory() / strFile;
@@ -687,23 +659,17 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const
void BerkeleyDatabase::Flush()
{
- if (!IsDummy()) {
- env->Flush(false);
- }
+ env->Flush(false);
}
void BerkeleyDatabase::Close()
{
- if (!IsDummy()) {
- env->Flush(true);
- }
+ env->Flush(true);
}
void BerkeleyDatabase::ReloadDbEnv()
{
- if (!IsDummy()) {
- env->ReloadDbEnv();
- }
+ env->ReloadDbEnv();
}
bool BerkeleyBatch::StartCursor()
@@ -801,7 +767,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
{
if (!pdb)
- return true;
+ return false;
if (fReadOnly)
assert(!"Write called on database in read-only mode");
@@ -840,16 +806,18 @@ bool BerkeleyBatch::HasKey(CDataStream&& key)
void BerkeleyDatabase::AddRef()
{
LOCK(cs_db);
- ++env->mapFileUseCount[strFile];
+ if (m_refcount < 0) {
+ m_refcount = 1;
+ } else {
+ m_refcount++;
+ }
}
void BerkeleyDatabase::RemoveRef()
{
- {
- LOCK(cs_db);
- --env->mapFileUseCount[strFile];
- }
- env->m_db_in_use.notify_all();
+ LOCK(cs_db);
+ m_refcount--;
+ if (env) env->m_db_in_use.notify_all();
}
std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close)
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index ef3b81d4d6..75546924e8 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -52,7 +52,6 @@ private:
public:
std::unique_ptr<DbEnv> dbenv;
- std::map<std::string, int> mapFileUseCount;
std::map<std::string, std::reference_wrapper<BerkeleyDatabase>> m_databases;
std::unordered_map<std::string, WalletDatabaseFileId> m_fileids;
std::condition_variable_any m_db_in_use;
@@ -67,8 +66,6 @@ public:
bool IsDatabaseLoaded(const std::string& db_filename) const { return m_databases.find(db_filename) != m_databases.end(); }
fs::path Directory() const { return strPath; }
- bool Verify(const std::string& strFile);
-
bool Open(bilingual_str& error);
void Close();
void Flush(bool fShutdown);
@@ -100,12 +97,8 @@ class BerkeleyBatch;
**/
class BerkeleyDatabase : public WalletDatabase
{
- friend class BerkeleyBatch;
public:
- /** Create dummy DB handle */
- BerkeleyDatabase() : WalletDatabase(), env(nullptr)
- {
- }
+ BerkeleyDatabase() = delete;
/** Create DB handle to real database */
BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) :
@@ -166,17 +159,10 @@ public:
/** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
std::unique_ptr<Db> m_db;
- /** Make a BerkeleyBatch connected to this database */
- std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override;
-
-private:
std::string strFile;
- /** Return whether this database handle is a dummy for testing.
- * Only to be used at a low level, application should ideally not care
- * about this.
- */
- bool IsDummy() const { return env == nullptr; }
+ /** Make a BerkeleyBatch connected to this database */
+ std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override;
};
/** RAII class that provides access to a Berkeley database */
@@ -220,7 +206,7 @@ protected:
public:
explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true);
- ~BerkeleyBatch() override { Close(); }
+ ~BerkeleyBatch() override;
BerkeleyBatch(const BerkeleyBatch&) = delete;
BerkeleyBatch& operator=(const BerkeleyBatch&) = delete;
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 12dc1cc96b..0afaba5fd1 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -9,6 +9,7 @@
#include <clientversion.h>
#include <fs.h>
#include <streams.h>
+#include <util/memory.h>
#include <atomic>
#include <memory>
@@ -154,4 +155,44 @@ public:
virtual std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) = 0;
};
+/** RAII class that provides access to a DummyDatabase. Never fails. */
+class DummyBatch : public DatabaseBatch
+{
+private:
+ bool ReadKey(CDataStream&& key, CDataStream& value) override { return true; }
+ bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return true; }
+ bool EraseKey(CDataStream&& key) override { return true; }
+ bool HasKey(CDataStream&& key) override { return true; }
+
+public:
+ void Flush() override {}
+ void Close() override {}
+
+ bool StartCursor() override { return true; }
+ bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override { return true; }
+ void CloseCursor() override {}
+ bool TxnBegin() override { return true; }
+ bool TxnCommit() override { return true; }
+ bool TxnAbort() override { return true; }
+};
+
+/** A dummy WalletDatabase that does nothing and never fails. Only used by unit tests.
+ **/
+class DummyDatabase : public WalletDatabase
+{
+public:
+ void Open(const char* mode) override {};
+ void AddRef() override {}
+ void RemoveRef() override {}
+ bool Rewrite(const char* pszSkip=nullptr) override { return true; }
+ bool Backup(const std::string& strDest) const override { return true; }
+ void Close() override {}
+ void Flush() override {}
+ bool PeriodicFlush() override { return true; }
+ void IncrementUpdateCounter() override { ++nUpdateCounter; }
+ void ReloadDbEnv() override {}
+ bool Verify(bilingual_str& errorStr) override { return true; }
+ std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override { return MakeUnique<DummyBatch>(); }
+};
+
#endif // BITCOIN_WALLET_DB_H
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 781920755c..52162ab521 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -24,7 +24,7 @@ public:
bool HasWalletSupport() const override {return true;}
//! Return the wallets help message.
- void AddWalletOptions() const override;
+ void AddWalletOptions(ArgsManager& argsman) const override;
//! Wallets parameter interaction
bool ParameterInteraction() const override;
@@ -35,42 +35,42 @@ public:
const WalletInitInterface& g_wallet_init_interface = WalletInit();
-void WalletInit::AddWalletOptions() const
+void WalletInit::AddWalletOptions(ArgsManager& argsman) const
{
- gArgs.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). "
+ argsman.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). "
"Note: An output is discarded if it is dust at this rate, but we will always discard up to the dust relay fee and a discard fee above that is limited by the fee estimate for the longest target",
CURRENCY_UNIT, FormatMoney(DEFAULT_DISCARD_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)",
+ argsman.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_FALLBACK_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)",
+ argsman.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)",
+ argsman.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MINFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)",
+ argsman.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)",
CURRENCY_UNIT, FormatMoney(CFeeRate{DEFAULT_PAY_TX_FEE}.GetFeePerK())), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
- gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
+ argsman.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
#if HAVE_SYSTEM
- gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
#endif
- gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
+ argsman.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
" (1 = keep tx meta data e.g. payment request information, 2 = drop tx meta data)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
}
bool WalletInit::ParameterInteraction() const
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 9d334063c4..39d1f49e9e 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2338,7 +2338,7 @@ static UniValue getwalletinfo(const JSONRPCRequest& request)
{RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only."},
{RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"},
{RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"},
- {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"},
+ {RPCResult::Type::NUM_TIME, "unlocked_until", /* optional */ true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"},
{RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"},
{RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"},
{RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"},
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index d2770a46f7..7ef06663b5 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -630,13 +630,13 @@ static size_t CalculateNestedKeyhashInputSize(bool use_max_sig)
CPubKey pubkey = key.GetPubKey();
// Generate pubkey hash
- uint160 key_hash(Hash160(pubkey.begin(), pubkey.end()));
+ uint160 key_hash(Hash160(pubkey));
// Create inner-script to enter into keystore. Key hash can't be 0...
CScript inner_script = CScript() << OP_0 << std::vector<unsigned char>(key_hash.begin(), key_hash.end());
// Create outer P2SH script for the output
- uint160 script_id(Hash160(inner_script.begin(), inner_script.end()));
+ uint160 script_id(Hash160(inner_script));
CScript script_pubkey = CScript() << OP_HASH160 << std::vector<unsigned char>(script_id.begin(), script_id.end()) << OP_EQUAL;
// Add inner-script to key store and key to watchonly
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 8c409b40cd..fa6814d0d3 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -103,7 +103,7 @@ bool WalletBatch::WriteKey(const CPubKey& vchPubKey, const CPrivKey& vchPrivKey,
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end());
- return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false);
+ return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey)), false);
}
bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey,
@@ -115,7 +115,7 @@ bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey,
}
// Compute a checksum of the encrypted key
- uint256 checksum = Hash(vchCryptedSecret.begin(), vchCryptedSecret.end());
+ uint256 checksum = Hash(vchCryptedSecret);
const auto key = std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey);
if (!WriteIC(key, std::make_pair(vchCryptedSecret, checksum), false)) {
@@ -209,7 +209,7 @@ bool WalletBatch::WriteDescriptorKey(const uint256& desc_id, const CPubKey& pubk
key.insert(key.end(), pubkey.begin(), pubkey.end());
key.insert(key.end(), privkey.begin(), privkey.end());
- return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key.begin(), key.end())), false);
+ return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key)), false);
}
bool WalletBatch::WriteCryptedDescriptorKey(const uint256& desc_id, const CPubKey& pubkey, const std::vector<unsigned char>& secret)
@@ -365,7 +365,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), pkey.begin(), pkey.end());
- if (Hash(vchKey.begin(), vchKey.end()) != hash)
+ if (Hash(vchKey) != hash)
{
strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt";
return false;
@@ -414,7 +414,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
if (!ssValue.eof()) {
uint256 checksum;
ssValue >> checksum;
- if ((checksum_valid = Hash(vchPrivKey.begin(), vchPrivKey.end()) != checksum)) {
+ if ((checksum_valid = Hash(vchPrivKey) != checksum)) {
strErr = "Error reading wallet database: Crypted key corrupt";
return false;
}
@@ -621,7 +621,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
to_hash.insert(to_hash.end(), pubkey.begin(), pubkey.end());
to_hash.insert(to_hash.end(), pkey.begin(), pkey.end());
- if (Hash(to_hash.begin(), to_hash.end()) != hash)
+ if (Hash(to_hash) != hash)
{
strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt";
return false;
@@ -1021,7 +1021,7 @@ std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path)
/** Return object for accessing dummy database with no read/write capabilities. */
std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase()
{
- return MakeUnique<BerkeleyDatabase>();
+ return MakeUnique<DummyDatabase>();
}
/** Return object for accessing temporary in-memory database. */
diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h
index f4730273f1..a55e02f2dc 100644
--- a/src/walletinitinterface.h
+++ b/src/walletinitinterface.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_WALLETINITINTERFACE_H
#define BITCOIN_WALLETINITINTERFACE_H
+class ArgsManager;
+
struct NodeContext;
class WalletInitInterface {
@@ -12,7 +14,7 @@ public:
/** Is the wallet component enabled */
virtual bool HasWalletSupport() const = 0;
/** Get wallet help string */
- virtual void AddWalletOptions() const = 0;
+ virtual void AddWalletOptions(ArgsManager& argsman) const = 0;
/** Check wallet parameter interaction */
virtual bool ParameterInteraction() const = 0;
/** Add wallets that should be opened to list of chain clients. */