aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--depends/README.md16
-rw-r--r--doc/release-notes.md8
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/bench/Examples.cpp4
-rw-r--r--src/bench/base58.cpp6
-rw-r--r--src/bench/bench.cpp182
-rw-r--r--src/bench/bench.h143
-rw-r--r--src/bench/bench_bitcoin.cpp48
-rw-r--r--src/bench/ccoins_caching.cpp2
-rw-r--r--src/bench/checkblock.cpp4
-rw-r--r--src/bench/checkqueue.cpp43
-rw-r--r--src/bench/coin_selection.cpp2
-rw-r--r--src/bench/crypto_hash.cpp35
-rw-r--r--src/bench/lockedpool.cpp3
-rw-r--r--src/bench/mempool_eviction.cpp2
-rw-r--r--src/bench/prevector_destructor.cpp4
-rw-r--r--src/bench/rollingbloom.cpp16
-rw-r--r--src/bench/verify_script.cpp2
-rw-r--r--src/net_processing.cpp368
-rw-r--r--src/qt/forms/sendcoinsdialog.ui4
-rw-r--r--src/qt/sendcoinsdialog.cpp14
-rw-r--r--src/qt/walletmodel.cpp5
-rw-r--r--src/qt/walletmodel.h2
-rw-r--r--src/rpc/blockchain.cpp17
-rw-r--r--src/scheduler.cpp5
-rw-r--r--src/scheduler.h2
-rw-r--r--src/test/blockchain_tests.cpp126
-rw-r--r--src/test/miner_tests.cpp38
-rw-r--r--src/test/test_bitcoin.cpp6
-rw-r--r--src/test/txvalidationcache_tests.cpp21
-rw-r--r--src/validation.cpp12
-rw-r--r--src/validationinterface.cpp17
-rw-r--r--src/validationinterface.h12
-rw-r--r--src/wallet/init.cpp2
-rw-r--r--src/wallet/test/wallet_tests.cpp17
-rw-r--r--src/wallet/wallet.cpp7
-rwxr-xr-xtest/functional/wallet.py3
-rwxr-xr-xtest/util/bitcoin-util-test.py12
38 files changed, 762 insertions, 449 deletions
diff --git a/depends/README.md b/depends/README.md
index 6053c531b4..99eef1952c 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -28,6 +28,22 @@ Common `host-platform-triplets` for cross compilation are:
No other options are needed, the paths are automatically configured.
+Install the required dependencies: Ubuntu & Debian
+--------------------------------------------------
+
+For macOS cross compilation:
+
+ sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python-setuptools
+
+For Win32/Win64 cross compilation:
+
+- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux)
+
+For linux (including i386, ARM) cross compilation:
+
+ sudo apt-get install curl g++-aarch64-linux-gnu g++-4.8-aarch64-linux-gnu gcc-4.8-aarch64-linux-gnu binutils-aarch64-linux-gnu g++-arm-linux-gnueabihf g++-4.8-arm-linux-gnueabihf gcc-4.8-arm-linux-gnueabihf binutils-arm-linux-gnueabihf g++-4.8-multilib gcc-4.8-multilib binutils-gold bsdmainutils
+
+
Dependency Options:
The following can be set when running make: make FOO=bar
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 031df7402d..d92666da72 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -67,6 +67,14 @@ Due to a backward-incompatible change in the wallet database, wallets created
with version 0.16.0 will be rejected by previous versions. Also, version 0.16.0
will only create hierarchical deterministic (HD) wallets.
+Replace-By-Fee by default in GUI
+--------------------------------
+The send screen now uses BIP-125 RBF by default, regardless of `-walletrbf`.
+There is a checkbox to mark the transaction as final.
+
+The RPC default remains unchanged: to use RBF, launch with `-walletrbf=1` or
+use the `replaceable` argument for individual transactions.
+
Custom wallet directories
---------------------
The ability to specify a directory other than the default data directory in which to store
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 49a814c3f6..d4d972b2bb 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -33,6 +33,7 @@ BITCOIN_TESTS =\
test/base64_tests.cpp \
test/bech32_tests.cpp \
test/bip32_tests.cpp \
+ test/blockchain_tests.cpp \
test/blockencodings_tests.cpp \
test/bloom_tests.cpp \
test/bswap_tests.cpp \
diff --git a/src/bench/Examples.cpp b/src/bench/Examples.cpp
index 536e450940..47d80e5e01 100644
--- a/src/bench/Examples.cpp
+++ b/src/bench/Examples.cpp
@@ -15,7 +15,7 @@ static void Sleep100ms(benchmark::State& state)
}
}
-BENCHMARK(Sleep100ms);
+BENCHMARK(Sleep100ms, 10);
// Extremely fast-running benchmark:
#include <math.h>
@@ -31,4 +31,4 @@ static void Trig(benchmark::State& state)
}
}
-BENCHMARK(Trig);
+BENCHMARK(Trig, 12 * 1000 * 1000);
diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp
index 2d9a9f2908..294fcc3c33 100644
--- a/src/bench/base58.cpp
+++ b/src/bench/base58.cpp
@@ -54,6 +54,6 @@ static void Base58Decode(benchmark::State& state)
}
-BENCHMARK(Base58Encode);
-BENCHMARK(Base58CheckEncode);
-BENCHMARK(Base58Decode);
+BENCHMARK(Base58Encode, 470 * 1000);
+BENCHMARK(Base58CheckEncode, 320 * 1000);
+BENCHMARK(Base58Decode, 800 * 1000);
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index 1482452814..edbad09ebd 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -8,98 +8,136 @@
#include <assert.h>
#include <iostream>
#include <iomanip>
+#include <algorithm>
+#include <regex>
+#include <numeric>
-benchmark::BenchRunner::BenchmarkMap &benchmark::BenchRunner::benchmarks() {
- static std::map<std::string, benchmark::BenchFunction> benchmarks_map;
- return benchmarks_map;
+void benchmark::ConsolePrinter::header()
+{
+ std::cout << "# Benchmark, evals, iterations, total, min, max, median" << std::endl;
}
-benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func)
+void benchmark::ConsolePrinter::result(const State& state)
{
- benchmarks().insert(std::make_pair(name, func));
+ auto results = state.m_elapsed_results;
+ std::sort(results.begin(), results.end());
+
+ double total = state.m_num_iters * std::accumulate(results.begin(), results.end(), 0.0);
+
+ double front = 0;
+ double back = 0;
+ double median = 0;
+
+ if (!results.empty()) {
+ front = results.front();
+ back = results.back();
+
+ size_t mid = results.size() / 2;
+ median = results[mid];
+ if (0 == results.size() % 2) {
+ median = (results[mid] + results[mid + 1]) / 2;
+ }
+ }
+
+ std::cout << std::setprecision(6);
+ std::cout << state.m_name << ", " << state.m_num_evals << ", " << state.m_num_iters << ", " << total << ", " << front << ", " << back << ", " << median << std::endl;
}
-void
-benchmark::BenchRunner::RunAll(benchmark::duration elapsedTimeForOne)
+void benchmark::ConsolePrinter::footer() {}
+benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height)
+ : m_plotly_url(plotly_url), m_width(width), m_height(height)
{
- perf_init();
- if (std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
- std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
- }
- std::cout << "#Benchmark" << "," << "count" << "," << "min(ns)" << "," << "max(ns)" << "," << "average(ns)" << ","
- << "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n";
+}
- for (const auto &p: benchmarks()) {
- State state(p.first, elapsedTimeForOne);
- p.second(state);
- }
- perf_fini();
+void benchmark::PlotlyPrinter::header()
+{
+ std::cout << "<html><head>"
+ << "<script src=\"" << m_plotly_url << "\"></script>"
+ << "</head><body><div id=\"myDiv\" style=\"width:" << m_width << "px; height:" << m_height << "px\"></div>"
+ << "<script> var data = ["
+ << std::endl;
}
-bool benchmark::State::KeepRunning()
+void benchmark::PlotlyPrinter::result(const State& state)
{
- if (count & countMask) {
- ++count;
- return true;
+ std::cout << "{ " << std::endl
+ << " name: '" << state.m_name << "', " << std::endl
+ << " y: [";
+
+ const char* prefix = "";
+ for (const auto& e : state.m_elapsed_results) {
+ std::cout << prefix << std::setprecision(6) << e;
+ prefix = ", ";
}
- time_point now;
+ std::cout << "]," << std::endl
+ << " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box',"
+ << std::endl
+ << "}," << std::endl;
+}
+
+void benchmark::PlotlyPrinter::footer()
+{
+ std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: 'tozero', autorange: true } };"
+ << "Plotly.newPlot('myDiv', data, layout);"
+ << "</script></body></html>";
+}
- uint64_t nowCycles;
- if (count == 0) {
- lastTime = beginTime = now = clock::now();
- lastCycles = beginCycles = nowCycles = perf_cpucycles();
+
+benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks()
+{
+ static std::map<std::string, Bench> benchmarks_map;
+ return benchmarks_map;
+}
+
+benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second)
+{
+ benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second}));
+}
+
+void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only)
+{
+ perf_init();
+ if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
+ std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
}
- else {
- now = clock::now();
- auto elapsed = now - lastTime;
- auto elapsedOne = elapsed / (countMask + 1);
- if (elapsedOne < minTime) minTime = elapsedOne;
- if (elapsedOne > maxTime) maxTime = elapsedOne;
-
- // We only use relative values, so don't have to handle 64-bit wrap-around specially
- nowCycles = perf_cpucycles();
- uint64_t elapsedOneCycles = (nowCycles - lastCycles) / (countMask + 1);
- if (elapsedOneCycles < minCycles) minCycles = elapsedOneCycles;
- if (elapsedOneCycles > maxCycles) maxCycles = elapsedOneCycles;
-
- if (elapsed*128 < maxElapsed) {
- // If the execution was much too fast (1/128th of maxElapsed), increase the count mask by 8x and restart timing.
- // The restart avoids including the overhead of this code in the measurement.
- countMask = ((countMask<<3)|7) & ((1LL<<60)-1);
- count = 0;
- minTime = duration::max();
- maxTime = duration::zero();
- minCycles = std::numeric_limits<uint64_t>::max();
- maxCycles = std::numeric_limits<uint64_t>::min();
- return true;
+
+ std::regex reFilter(filter);
+ std::smatch baseMatch;
+
+ printer.header();
+
+ for (const auto& p : benchmarks()) {
+ if (!std::regex_match(p.first, baseMatch, reFilter)) {
+ continue;
+ }
+
+ uint64_t num_iters = static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling);
+ if (0 == num_iters) {
+ num_iters = 1;
}
- if (elapsed*16 < maxElapsed) {
- uint64_t newCountMask = ((countMask<<1)|1) & ((1LL<<60)-1);
- if ((count & newCountMask)==0) {
- countMask = newCountMask;
- }
+ State state(p.first, num_evals, num_iters, printer);
+ if (!is_list_only) {
+ p.second.func(state);
}
+ printer.result(state);
}
- lastTime = now;
- lastCycles = nowCycles;
- ++count;
- if (now - beginTime < maxElapsed) return true; // Keep going
+ printer.footer();
- --count;
+ perf_fini();
+}
- assert(count != 0 && "count == 0 => (now == 0 && beginTime == 0) => return above");
+bool benchmark::State::UpdateTimer(const benchmark::time_point current_time)
+{
+ if (m_start_time != time_point()) {
+ std::chrono::duration<double> diff = current_time - m_start_time;
+ m_elapsed_results.push_back(diff.count() / m_num_iters);
- // Output results
- // Duration casts are only necessary here because hardware with sub-nanosecond clocks
- // will lose precision.
- int64_t min_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(minTime).count();
- int64_t max_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime).count();
- int64_t avg_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>((now-beginTime)/count).count();
- int64_t averageCycles = (nowCycles-beginCycles)/count;
- std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << min_elapsed << "," << max_elapsed << "," << avg_elapsed << ","
- << minCycles << "," << maxCycles << "," << averageCycles << "\n";
- std::cout.copyfmt(std::ios(nullptr));
+ if (m_elapsed_results.size() == m_num_evals) {
+ return false;
+ }
+ }
- return false;
+ m_num_iters_left = m_num_iters - 1;
+ return true;
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index 071a5dc9c7..b7d81f0e21 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -9,6 +9,7 @@
#include <limits>
#include <map>
#include <string>
+#include <vector>
#include <chrono>
#include <boost/preprocessor/cat.hpp>
@@ -32,64 +33,110 @@ static void CODE_TO_TIME(benchmark::State& state)
... do any cleanup needed...
}
-BENCHMARK(CODE_TO_TIME);
+// default to running benchmark for 5000 iterations
+BENCHMARK(CODE_TO_TIME, 5000);
*/
-
+
namespace benchmark {
- // In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
- struct best_clock {
- using hi_res_clock = std::chrono::high_resolution_clock;
- using steady_clock = std::chrono::steady_clock;
- using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
- };
- using clock = best_clock::type;
- using time_point = clock::time_point;
- using duration = clock::duration;
-
- class State {
- std::string name;
- duration maxElapsed;
- time_point beginTime, lastTime;
- duration minTime, maxTime;
- uint64_t count;
- uint64_t countMask;
- uint64_t beginCycles;
- uint64_t lastCycles;
- uint64_t minCycles;
- uint64_t maxCycles;
- public:
- State(std::string _name, duration _maxElapsed) :
- name(_name),
- maxElapsed(_maxElapsed),
- minTime(duration::max()),
- maxTime(duration::zero()),
- count(0),
- countMask(1),
- beginCycles(0),
- lastCycles(0),
- minCycles(std::numeric_limits<uint64_t>::max()),
- maxCycles(std::numeric_limits<uint64_t>::min()) {
- }
- bool KeepRunning();
- };
+// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
+struct best_clock {
+ using hi_res_clock = std::chrono::high_resolution_clock;
+ using steady_clock = std::chrono::steady_clock;
+ using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
+};
+using clock = best_clock::type;
+using time_point = clock::time_point;
+using duration = clock::duration;
+
+class Printer;
+
+class State
+{
+public:
+ std::string m_name;
+ uint64_t m_num_iters_left;
+ const uint64_t m_num_iters;
+ const uint64_t m_num_evals;
+ std::vector<double> m_elapsed_results;
+ time_point m_start_time;
+
+ bool UpdateTimer(time_point finish_time);
- typedef std::function<void(State&)> BenchFunction;
+ State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals)
+ {
+ }
- class BenchRunner
+ inline bool KeepRunning()
{
- typedef std::map<std::string, BenchFunction> BenchmarkMap;
- static BenchmarkMap &benchmarks();
+ if (m_num_iters_left--) {
+ return true;
+ }
+
+ bool result = UpdateTimer(clock::now());
+ // measure again so runtime of UpdateTimer is not included
+ m_start_time = clock::now();
+ return result;
+ }
+};
- public:
- BenchRunner(std::string name, BenchFunction func);
+typedef std::function<void(State&)> BenchFunction;
- static void RunAll(duration elapsedTimeForOne = std::chrono::seconds(1));
+class BenchRunner
+{
+ struct Bench {
+ BenchFunction func;
+ uint64_t num_iters_for_one_second;
};
+ typedef std::map<std::string, Bench> BenchmarkMap;
+ static BenchmarkMap& benchmarks();
+
+public:
+ BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second);
+
+ static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only);
+};
+
+// interface to output benchmark results.
+class Printer
+{
+public:
+ virtual ~Printer() {}
+ virtual void header() = 0;
+ virtual void result(const State& state) = 0;
+ virtual void footer() = 0;
+};
+
+// default printer to console, shows min, max, median.
+class ConsolePrinter : public Printer
+{
+public:
+ void header();
+ void result(const State& state);
+ void footer();
+};
+
+// creates box plot with plotly.js
+class PlotlyPrinter : public Printer
+{
+public:
+ PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height);
+ void header();
+ void result(const State& state);
+ void footer();
+
+private:
+ std::string m_plotly_url;
+ int64_t m_width;
+ int64_t m_height;
+};
}
-// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo);
-#define BENCHMARK(n) \
- benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n);
+
+// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations);
+// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately
+// the same time, and scaling factor can be used that the total time is appropriate for your system.
+#define BENCHMARK(n, num_iters_for_one_second) \
+ benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second));
#endif // BITCOIN_BENCH_BENCH_H
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index 84e51d809a..5e0432f0ee 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -10,16 +10,62 @@
#include <util.h>
#include <random.h>
+#include <boost/lexical_cast.hpp>
+
+#include <memory>
+
+static const int64_t DEFAULT_BENCH_EVALUATIONS = 5;
+static const char* DEFAULT_BENCH_FILTER = ".*";
+static const char* DEFAULT_BENCH_SCALING = "1.0";
+static const char* DEFAULT_BENCH_PRINTER = "console";
+static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js";
+static const int64_t DEFAULT_PLOT_WIDTH = 1024;
+static const int64_t DEFAULT_PLOT_HEIGHT = 768;
+
int
main(int argc, char** argv)
{
+ gArgs.ParseParameters(argc, argv);
+
+ if (gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || gArgs.IsArgSet("-help")) {
+ std::cout << HelpMessageGroup(_("Options:"))
+ << HelpMessageOpt("-?", _("Print this help message and exit"))
+ << HelpMessageOpt("-list", _("List benchmarks without executing them. Can be combined with -scaling and -filter"))
+ << HelpMessageOpt("-evals=<n>", strprintf(_("Number of measurement evaluations to perform. (default: %u)"), DEFAULT_BENCH_EVALUATIONS))
+ << HelpMessageOpt("-filter=<regex>", strprintf(_("Regular expression filter to select benchmark by name (default: %s)"), DEFAULT_BENCH_FILTER))
+ << HelpMessageOpt("-scaling=<n>", strprintf(_("Scaling factor for benchmark's runtime (default: %u)"), DEFAULT_BENCH_SCALING))
+ << HelpMessageOpt("-printer=(console|plot)", strprintf(_("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)"), DEFAULT_BENCH_PRINTER))
+ << HelpMessageOpt("-plot-plotlyurl=<uri>", strprintf(_("URL to use for plotly.js (default: %s)"), DEFAULT_PLOT_PLOTLYURL))
+ << HelpMessageOpt("-plot-width=<x>", strprintf(_("Plot width in pixel (default: %u)"), DEFAULT_PLOT_WIDTH))
+ << HelpMessageOpt("-plot-height=<x>", strprintf(_("Plot height in pixel (default: %u)"), DEFAULT_PLOT_HEIGHT));
+
+ return 0;
+ }
+
SHA256AutoDetect();
RandomInit();
ECC_Start();
SetupEnvironment();
fPrintToDebugLog = false; // don't want to write to debug.log file
- benchmark::BenchRunner::RunAll();
+ int64_t evaluations = gArgs.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS);
+ std::string regex_filter = gArgs.GetArg("-filter", DEFAULT_BENCH_FILTER);
+ std::string scaling_str = gArgs.GetArg("-scaling", DEFAULT_BENCH_SCALING);
+ bool is_list_only = gArgs.GetBoolArg("-list", false);
+
+ double scaling_factor = boost::lexical_cast<double>(scaling_str);
+
+
+ std::unique_ptr<benchmark::Printer> printer(new benchmark::ConsolePrinter());
+ std::string printer_arg = gArgs.GetArg("-printer", DEFAULT_BENCH_PRINTER);
+ if ("plot" == printer_arg) {
+ printer.reset(new benchmark::PlotlyPrinter(
+ gArgs.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL),
+ gArgs.GetArg("-plot-width", DEFAULT_PLOT_WIDTH),
+ gArgs.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT)));
+ }
+
+ benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only);
ECC_Stop();
}
diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp
index 89ba3d3d21..1bce0fffbd 100644
--- a/src/bench/ccoins_caching.cpp
+++ b/src/bench/ccoins_caching.cpp
@@ -84,4 +84,4 @@ static void CCoinsCaching(benchmark::State& state)
}
}
-BENCHMARK(CCoinsCaching);
+BENCHMARK(CCoinsCaching, 170 * 1000);
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 9533b3c711..af77c8cd8b 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -52,5 +52,5 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
}
}
-BENCHMARK(DeserializeBlockTest);
-BENCHMARK(DeserializeAndCheckBlockTest);
+BENCHMARK(DeserializeBlockTest, 130);
+BENCHMARK(DeserializeAndCheckBlockTest, 160);
diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp
index 35750aa1b6..4d41e28db6 100644
--- a/src/bench/checkqueue.cpp
+++ b/src/bench/checkqueue.cpp
@@ -12,51 +12,11 @@
#include <random.h>
-// This Benchmark tests the CheckQueue with the lightest
-// weight Checks, so it should make any lock contention
-// particularly visible
static const int MIN_CORES = 2;
static const size_t BATCHES = 101;
static const size_t BATCH_SIZE = 30;
static const int PREVECTOR_SIZE = 28;
static const unsigned int QUEUE_BATCH_SIZE = 128;
-static void CCheckQueueSpeed(benchmark::State& state)
-{
- struct FakeJobNoWork {
- bool operator()()
- {
- return true;
- }
- void swap(FakeJobNoWork& x){};
- };
- CCheckQueue<FakeJobNoWork> queue {QUEUE_BATCH_SIZE};
- boost::thread_group tg;
- for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) {
- tg.create_thread([&]{queue.Thread();});
- }
- while (state.KeepRunning()) {
- CCheckQueueControl<FakeJobNoWork> control(&queue);
-
- // We call Add a number of times to simulate the behavior of adding
- // a block of transactions at once.
-
- std::vector<std::vector<FakeJobNoWork>> vBatches(BATCHES);
- for (auto& vChecks : vBatches) {
- vChecks.resize(BATCH_SIZE);
- }
- for (auto& vChecks : vBatches) {
- // We can't make vChecks in the inner loop because we want to measure
- // the cost of getting the memory to each thread and we might get the same
- // memory
- control.Add(vChecks);
- }
- // control waits for completion by RAII, but
- // it is done explicitly here for clarity
- control.Wait();
- }
- tg.interrupt_all();
- tg.join_all();
-}
// This Benchmark tests the CheckQueue with a slightly realistic workload,
// where checks all contain a prevector that is indirect 50% of the time
@@ -99,5 +59,4 @@ static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
tg.interrupt_all();
tg.join_all();
}
-BENCHMARK(CCheckQueueSpeed);
-BENCHMARK(CCheckQueueSpeedPrevectorJob);
+BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400);
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index ff57f88170..b9353293d4 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -56,4 +56,4 @@ static void CoinSelection(benchmark::State& state)
}
}
-BENCHMARK(CoinSelection);
+BENCHMARK(CoinSelection, 650);
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index b37b5cad62..bb89718074 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -46,9 +46,9 @@ static void SHA256_32b(benchmark::State& state)
{
std::vector<uint8_t> in(32,0);
while (state.KeepRunning()) {
- for (int i = 0; i < 1000000; i++) {
- CSHA256().Write(in.data(), in.size()).Finalize(in.data());
- }
+ CSHA256()
+ .Write(in.data(), in.size())
+ .Finalize(in.data());
}
}
@@ -63,10 +63,9 @@ static void SHA512(benchmark::State& state)
static void SipHash_32b(benchmark::State& state)
{
uint256 x;
+ uint64_t k1 = 0;
while (state.KeepRunning()) {
- for (int i = 0; i < 1000000; i++) {
- *((uint64_t*)x.begin()) = SipHashUint256(0, i, x);
- }
+ *((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x);
}
}
@@ -75,9 +74,7 @@ static void FastRandom_32bit(benchmark::State& state)
FastRandomContext rng(true);
uint32_t x = 0;
while (state.KeepRunning()) {
- for (int i = 0; i < 1000000; i++) {
- x += rng.rand32();
- }
+ x += rng.rand32();
}
}
@@ -86,18 +83,16 @@ static void FastRandom_1bit(benchmark::State& state)
FastRandomContext rng(true);
uint32_t x = 0;
while (state.KeepRunning()) {
- for (int i = 0; i < 1000000; i++) {
- x += rng.randbool();
- }
+ x += rng.randbool();
}
}
-BENCHMARK(RIPEMD160);
-BENCHMARK(SHA1);
-BENCHMARK(SHA256);
-BENCHMARK(SHA512);
+BENCHMARK(RIPEMD160, 440);
+BENCHMARK(SHA1, 570);
+BENCHMARK(SHA256, 340);
+BENCHMARK(SHA512, 330);
-BENCHMARK(SHA256_32b);
-BENCHMARK(SipHash_32b);
-BENCHMARK(FastRandom_32bit);
-BENCHMARK(FastRandom_1bit);
+BENCHMARK(SHA256_32b, 4700 * 1000);
+BENCHMARK(SipHash_32b, 40 * 1000 * 1000);
+BENCHMARK(FastRandom_32bit, 110 * 1000 * 1000);
+BENCHMARK(FastRandom_1bit, 440 * 1000 * 1000);
diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp
index b0bfa95144..914e37a2ed 100644
--- a/src/bench/lockedpool.cpp
+++ b/src/bench/lockedpool.cpp
@@ -43,5 +43,4 @@ static void BenchLockedPool(benchmark::State& state)
addr.clear();
}
-BENCHMARK(BenchLockedPool);
-
+BENCHMARK(BenchLockedPool, 530);
diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp
index eda6edbb23..de9f6432e3 100644
--- a/src/bench/mempool_eviction.cpp
+++ b/src/bench/mempool_eviction.cpp
@@ -111,4 +111,4 @@ static void MempoolEviction(benchmark::State& state)
}
}
-BENCHMARK(MempoolEviction);
+BENCHMARK(MempoolEviction, 41000);
diff --git a/src/bench/prevector_destructor.cpp b/src/bench/prevector_destructor.cpp
index de7ecab737..39d0ee5eb1 100644
--- a/src/bench/prevector_destructor.cpp
+++ b/src/bench/prevector_destructor.cpp
@@ -32,5 +32,5 @@ static void PrevectorClear(benchmark::State& state)
}
}
-BENCHMARK(PrevectorDestructor);
-BENCHMARK(PrevectorClear);
+BENCHMARK(PrevectorDestructor, 5700);
+BENCHMARK(PrevectorClear, 5600);
diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp
index 452099b800..031355c06e 100644
--- a/src/bench/rollingbloom.cpp
+++ b/src/bench/rollingbloom.cpp
@@ -12,8 +12,6 @@ static void RollingBloom(benchmark::State& state)
CRollingBloomFilter filter(120000, 0.000001);
std::vector<unsigned char> data(32);
uint32_t count = 0;
- uint32_t nEntriesPerGeneration = (120000 + 1) / 2;
- uint32_t countnow = 0;
uint64_t match = 0;
while (state.KeepRunning()) {
count++;
@@ -21,16 +19,8 @@ static void RollingBloom(benchmark::State& state)
data[1] = count >> 8;
data[2] = count >> 16;
data[3] = count >> 24;
- if (countnow == nEntriesPerGeneration) {
- auto b = benchmark::clock::now();
- filter.insert(data);
- auto total = std::chrono::duration_cast<std::chrono::nanoseconds>(benchmark::clock::now() - b).count();
- std::cout << "RollingBloom-refresh,1," << total << "," << total << "," << total << "\n";
- countnow = 0;
- } else {
- filter.insert(data);
- }
- countnow++;
+ filter.insert(data);
+
data[0] = count >> 24;
data[1] = count >> 16;
data[2] = count >> 8;
@@ -39,4 +29,4 @@ static void RollingBloom(benchmark::State& state)
}
}
-BENCHMARK(RollingBloom);
+BENCHMARK(RollingBloom, 1500 * 1000);
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index bfa5806c9d..af6d626ed0 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -105,4 +105,4 @@ static void VerifyScriptBench(benchmark::State& state)
}
}
-BENCHMARK(VerifyScriptBench);
+BENCHMARK(VerifyScriptBench, 6300);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 20426eaceb..76e82a5ef6 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -51,12 +51,13 @@ struct COrphanTx {
NodeId fromPeer;
int64_t nTimeExpire;
};
-std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
-std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
-void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+static CCriticalSection g_cs_orphans;
+std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
+std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
+void EraseOrphansFor(NodeId peer);
-static size_t vExtraTxnForCompactIt = 0;
-static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main);
+static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
+static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
@@ -127,7 +128,7 @@ namespace {
int g_outbound_peers_with_protect_from_disconnect = 0;
/** When our tip was last updated. */
- int64_t g_last_tip_update = 0;
+ std::atomic<int64_t> g_last_tip_update(0);
/** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay;
@@ -631,7 +632,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
// mapOrphanTransactions
//
-void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
if (max_extra_txn <= 0)
@@ -642,7 +643,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
}
-bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
const uint256& hash = tx->GetHash();
if (mapOrphanTransactions.count(hash))
@@ -675,7 +676,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
return true;
}
-int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
@@ -695,6 +696,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void EraseOrphansFor(NodeId peer)
{
+ LOCK(g_cs_orphans);
int nErased = 0;
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
@@ -709,8 +711,10 @@ void EraseOrphansFor(NodeId peer)
}
-unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
+ LOCK(g_cs_orphans);
+
unsigned int nEvicted = 0;
static int64_t nNextSweep;
int64_t nNow = GetTime();
@@ -804,7 +808,7 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu
}
void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
- LOCK(cs_main);
+ LOCK(g_cs_orphans);
std::vector<uint256> vOrphanErase;
@@ -971,9 +975,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
recentRejects->reset();
}
+ {
+ LOCK(g_cs_orphans);
+ if (mapOrphanTransactions.count(inv.hash)) return true;
+ }
+
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
- mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
}
@@ -1030,180 +1038,198 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
-void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
+void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensusParams, const CInv& inv, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
- std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
- std::vector<CInv> vNotFound;
- const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
- LOCK(cs_main);
-
- while (it != pfrom->vRecvGetData.end()) {
- // Don't bother if send buffer is too full to respond anyway
- if (pfrom->fPauseSend)
- break;
+ bool send = false;
+ std::shared_ptr<const CBlock> a_recent_block;
+ std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
+ bool fWitnessesPresentInARecentCompactBlock;
+ {
+ LOCK(cs_most_recent_block);
+ a_recent_block = most_recent_block;
+ a_recent_compact_block = most_recent_compact_block;
+ fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
+ }
- const CInv &inv = *it;
+ bool need_activate_chain = false;
+ {
+ LOCK(cs_main);
+ BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
+ if (mi != mapBlockIndex.end())
{
- if (interruptMsgProc)
- return;
+ if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
+ mi->second->IsValid(BLOCK_VALID_TREE)) {
+ // If we have the block and all of its parents, but have not yet validated it,
+ // we might be in the middle of connecting it (ie in the unlock of cs_main
+ // before ActivateBestChain but after AcceptBlock).
+ // In this case, we need to run ActivateBestChain prior to checking the relay
+ // conditions below.
+ need_activate_chain = true;
+ }
+ }
+ } // release cs_main before calling ActivateBestChain
+ if (need_activate_chain) {
+ CValidationState dummy;
+ ActivateBestChain(dummy, Params(), a_recent_block);
+ }
- it++;
+ LOCK(cs_main);
+ BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
+ if (mi != mapBlockIndex.end()) {
+ send = BlockRequestAllowed(mi->second, consensusParams);
+ if (!send) {
+ LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
+ }
+ }
+ const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ // disconnect node in case we have reached the outbound limit for serving historical blocks
+ // never disconnect whitelisted nodes
+ if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
+ {
+ LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+
+ //disconnect node
+ pfrom->fDisconnect = true;
+ send = false;
+ }
+ // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
+ if (send && !pfrom->fWhitelisted && (
+ (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
+ )) {
+ LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
+ //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
+ pfrom->fDisconnect = true;
+ send = false;
+ }
+ // Pruned nodes may have deleted the block, so check whether
+ // it's available before trying to send.
+ if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
+ {
+ std::shared_ptr<const CBlock> pblock;
+ if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
+ pblock = a_recent_block;
+ } else {
+ // Send block from disk
+ std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
+ if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
+ assert(!"cannot load block from disk");
+ pblock = pblockRead;
+ }
+ if (inv.type == MSG_BLOCK)
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
+ else if (inv.type == MSG_WITNESS_BLOCK)
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
+ else if (inv.type == MSG_FILTERED_BLOCK)
+ {
+ bool sendMerkleBlock = false;
+ CMerkleBlock merkleBlock;
{
- bool send = false;
- BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
- std::shared_ptr<const CBlock> a_recent_block;
- std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
- bool fWitnessesPresentInARecentCompactBlock;
- {
- LOCK(cs_most_recent_block);
- a_recent_block = most_recent_block;
- a_recent_compact_block = most_recent_compact_block;
- fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
+ LOCK(pfrom->cs_filter);
+ if (pfrom->pfilter) {
+ sendMerkleBlock = true;
+ merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
}
- if (mi != mapBlockIndex.end())
- {
- if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
- mi->second->IsValid(BLOCK_VALID_TREE)) {
- // If we have the block and all of its parents, but have not yet validated it,
- // we might be in the middle of connecting it (ie in the unlock of cs_main
- // before ActivateBestChain but after AcceptBlock).
- // In this case, we need to run ActivateBestChain prior to checking the relay
- // conditions below.
- CValidationState dummy;
- ActivateBestChain(dummy, Params(), a_recent_block);
- }
- send = BlockRequestAllowed(mi->second, consensusParams);
- if (!send) {
- LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
- }
+ }
+ if (sendMerkleBlock) {
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
+ // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
+ // This avoids hurting performance by pointlessly requiring a round-trip
+ // Note that there is currently no way for a node to request any single transactions we didn't send here -
+ // they must either disconnect and retry or request the full block.
+ // Thus, the protocol spec specified allows for us to provide duplicate txn here,
+ // however we MUST always provide at least what the remote peer needs
+ typedef std::pair<unsigned int, uint256> PairType;
+ for (PairType& pair : merkleBlock.vMatchedTxn)
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
+ }
+ // else
+ // no response
+ }
+ else if (inv.type == MSG_CMPCT_BLOCK)
+ {
+ // If a peer is asking for old blocks, we're almost guaranteed
+ // they won't have a useful mempool to match against a compact block,
+ // and we don't feel like constructing the object for them, so
+ // instead we respond with the full, non-compact block.
+ bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
+ int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
+ if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
+ } else {
+ CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
- // disconnect node in case we have reached the outbound limit for serving historical blocks
- // never disconnect whitelisted nodes
- if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
- {
- LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+ } else {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
+ }
+ }
- //disconnect node
- pfrom->fDisconnect = true;
- send = false;
- }
- // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
- if (send && !pfrom->fWhitelisted && (
- (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
- )) {
- LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
+ // Trigger the peer node to send a getblocks request for the next batch of inventory
+ if (inv.hash == pfrom->hashContinue)
+ {
+ // Bypass PushInventory, this must send even if redundant,
+ // and we want it right after the last block so they don't
+ // wait for other stuff first.
+ std::vector<CInv> vInv;
+ vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
+ pfrom->hashContinue.SetNull();
+ }
+ }
+}
- //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
- pfrom->fDisconnect = true;
- send = false;
- }
- // Pruned nodes may have deleted the block, so check whether
- // it's available before trying to send.
- if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
- {
- std::shared_ptr<const CBlock> pblock;
- if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
- pblock = a_recent_block;
- } else {
- // Send block from disk
- std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
- if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
- assert(!"cannot load block from disk");
- pblock = pblockRead;
- }
- if (inv.type == MSG_BLOCK)
- connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
- else if (inv.type == MSG_WITNESS_BLOCK)
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
- else if (inv.type == MSG_FILTERED_BLOCK)
- {
- bool sendMerkleBlock = false;
- CMerkleBlock merkleBlock;
- {
- LOCK(pfrom->cs_filter);
- if (pfrom->pfilter) {
- sendMerkleBlock = true;
- merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
- }
- }
- if (sendMerkleBlock) {
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
- // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
- // This avoids hurting performance by pointlessly requiring a round-trip
- // Note that there is currently no way for a node to request any single transactions we didn't send here -
- // they must either disconnect and retry or request the full block.
- // Thus, the protocol spec specified allows for us to provide duplicate txn here,
- // however we MUST always provide at least what the remote peer needs
- typedef std::pair<unsigned int, uint256> PairType;
- for (PairType& pair : merkleBlock.vMatchedTxn)
- connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
- }
- // else
- // no response
- }
- else if (inv.type == MSG_CMPCT_BLOCK)
- {
- // If a peer is asking for old blocks, we're almost guaranteed
- // they won't have a useful mempool to match against a compact block,
- // and we don't feel like constructing the object for them, so
- // instead we respond with the full, non-compact block.
- bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
- int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
- if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
- if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
- } else {
- CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
- }
- } else {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
- }
- }
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
+{
+ AssertLockNotHeld(cs_main);
- // Trigger the peer node to send a getblocks request for the next batch of inventory
- if (inv.hash == pfrom->hashContinue)
- {
- // Bypass PushInventory, this must send even if redundant,
- // and we want it right after the last block so they don't
- // wait for other stuff first.
- std::vector<CInv> vInv;
- vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
- pfrom->hashContinue.SetNull();
- }
- }
- }
- else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
- {
- // Send stream from relay memory
- bool push = false;
- auto mi = mapRelay.find(inv.hash);
- int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
- if (mi != mapRelay.end()) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
+ std::vector<CInv> vNotFound;
+ const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ {
+ LOCK(cs_main);
+
+ while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
+ if (interruptMsgProc)
+ return;
+ // Don't bother if send buffer is too full to respond anyway
+ if (pfrom->fPauseSend)
+ break;
+
+ const CInv &inv = *it;
+ it++;
+
+ // Send stream from relay memory
+ bool push = false;
+ auto mi = mapRelay.find(inv.hash);
+ int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
+ if (mi != mapRelay.end()) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ push = true;
+ } else if (pfrom->timeLastMempoolReq) {
+ auto txinfo = mempool.info(inv.hash);
+ // To protect privacy, do not answer getdata using the mempool when
+ // that TX couldn't have been INVed in reply to a MEMPOOL request.
+ if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
push = true;
- } else if (pfrom->timeLastMempoolReq) {
- auto txinfo = mempool.info(inv.hash);
- // To protect privacy, do not answer getdata using the mempool when
- // that TX couldn't have been INVed in reply to a MEMPOOL request.
- if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
- push = true;
- }
- }
- if (!push) {
- vNotFound.push_back(inv);
}
}
+ if (!push) {
+ vNotFound.push_back(inv);
+ }
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
+ }
+ } // release cs_main
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
- break;
+ if (it != pfrom->vRecvGetData.end()) {
+ const CInv &inv = *it;
+ it++;
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
+ ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc);
}
}
@@ -2008,7 +2034,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
pfrom->vRecvGetData.push_back(inv);
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
+ // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
return true;
}
@@ -2101,7 +2127,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
- LOCK(cs_main);
+ LOCK2(cs_main, g_cs_orphans);
bool fMissingInputs = false;
CValidationState state;
@@ -2324,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fBlockReconstructed = false;
{
- LOCK(cs_main);
+ LOCK2(cs_main, g_cs_orphans);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
diff --git a/src/qt/forms/sendcoinsdialog.ui b/src/qt/forms/sendcoinsdialog.ui
index c6fd708cdf..195a5560f7 100644
--- a/src/qt/forms/sendcoinsdialog.ui
+++ b/src/qt/forms/sendcoinsdialog.ui
@@ -1108,10 +1108,10 @@
<item>
<widget class="QCheckBox" name="optInRBF">
<property name="text">
- <string>Allow increasing fee</string>
+ <string>Enable Replace-By-Fee</string>
</property>
<property name="toolTip">
- <string>This allows you to increase the fee later if the transaction takes a long time to confirm. This will also cause the recommended fee to be lower. ("Replace-By-Fee", BIP 125)</string>
+ <string>With Replace-By-Fee (BIP-125) you can increase a transaction's fee after it is sent. Without this, a higher fee may be recommended to compensate for increased transaction delay risk.</string>
</property>
</widget>
</item>
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index faddfcd4d0..9473198dfc 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -181,7 +181,7 @@ void SendCoinsDialog::setModel(WalletModel *_model)
updateSmartFeeLabel();
// set default rbf checkbox state
- ui->optInRBF->setCheckState(model->getDefaultWalletRbf() ? Qt::Checked : Qt::Unchecked);
+ ui->optInRBF->setCheckState(Qt::Checked);
// set the smartfee-sliders default value (wallets default conf.target or last stored value)
QSettings settings;
@@ -339,12 +339,14 @@ void SendCoinsDialog::on_sendButton_clicked()
questionString.append(QString("<span style='font-size:10pt;font-weight:normal;'><br />(=%2)</span>")
.arg(alternativeUnits.join(" " + tr("or") + "<br />")));
- if (ui->optInRBF->isChecked())
- {
- questionString.append("<hr /><span>");
- questionString.append(tr("You can increase the fee later (signals Replace-By-Fee)."));
- questionString.append("</span>");
+ questionString.append("<hr /><span>");
+ if (ui->optInRBF->isChecked()) {
+ questionString.append(tr("You can increase the fee later (signals Replace-By-Fee, BIP-125)."));
+ } else {
+ questionString.append(tr("Not signalling Replace-By-Fee, BIP-125."));
}
+ questionString.append("</span>");
+
SendConfirmationDialog confirmationDialog(tr("Confirm send coins"),
questionString.arg(formatted.join("<br />")), SEND_CONFIRM_DELAY, this);
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index a38e233608..e0193ed475 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -737,8 +737,3 @@ int WalletModel::getDefaultConfirmTarget() const
{
return nTxConfirmTarget;
}
-
-bool WalletModel::getDefaultWalletRbf() const
-{
- return fWalletRbf;
-}
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 028146c187..8009e54b19 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -216,8 +216,6 @@ public:
int getDefaultConfirmTarget() const;
- bool getDefaultWalletRbf() const;
-
private:
CWallet *wallet;
bool fHaveWatchOnly;
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index b7895b86f7..426a6b3923 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -47,18 +47,20 @@ static CUpdatedBlock latestblock;
extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
-double GetDifficulty(const CBlockIndex* blockindex)
+/* Calculate the difficulty for a given block index,
+ * or the block index of the given chain.
+ */
+double GetDifficulty(const CChain& chain, const CBlockIndex* blockindex)
{
if (blockindex == nullptr)
{
- if (chainActive.Tip() == nullptr)
+ if (chain.Tip() == nullptr)
return 1.0;
else
- blockindex = chainActive.Tip();
+ blockindex = chain.Tip();
}
int nShift = (blockindex->nBits >> 24) & 0xff;
-
double dDiff =
(double)0x0000ffff / (double)(blockindex->nBits & 0x00ffffff);
@@ -76,6 +78,11 @@ double GetDifficulty(const CBlockIndex* blockindex)
return dDiff;
}
+double GetDifficulty(const CBlockIndex* blockindex)
+{
+ return GetDifficulty(chainActive, blockindex);
+}
+
UniValue blockheaderToJSON(const CBlockIndex* blockindex)
{
AssertLockHeld(cs_main);
@@ -1350,7 +1357,7 @@ UniValue mempoolInfoToJSON()
ret.push_back(Pair("usage", (int64_t) mempool.DynamicMemoryUsage()));
size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
ret.push_back(Pair("maxmempool", (int64_t) maxmempool));
- ret.push_back(Pair("mempoolminfee", ValueFromAmount(mempool.GetMinFee(maxmempool).GetFeePerK())));
+ ret.push_back(Pair("mempoolminfee", ValueFromAmount(std::max(mempool.GetMinFee(maxmempool), ::minRelayTxFee).GetFeePerK())));
return ret;
}
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 260f6fa60e..7d5ec14610 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -206,3 +206,8 @@ void SingleThreadedSchedulerClient::EmptyQueue() {
should_continue = !m_callbacks_pending.empty();
}
}
+
+size_t SingleThreadedSchedulerClient::CallbacksPending() {
+ LOCK(m_cs_callbacks_pending);
+ return m_callbacks_pending.size();
+}
diff --git a/src/scheduler.h b/src/scheduler.h
index b99f165343..39ecb849b9 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -108,6 +108,8 @@ public:
// Processes all remaining queue members on the calling thread, blocking until queue is empty
// Must be called after the CScheduler has no remaining processing threads!
void EmptyQueue();
+
+ size_t CallbacksPending();
};
#endif
diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp
new file mode 100644
index 0000000000..55fdd2c071
--- /dev/null
+++ b/src/test/blockchain_tests.cpp
@@ -0,0 +1,126 @@
+#include <boost/test/unit_test.hpp>
+
+#include "stdlib.h"
+
+#include "rpc/blockchain.cpp"
+#include "test/test_bitcoin.h"
+
+/* Equality between doubles is imprecise. Comparison should be done
+ * with a small threshold of tolerance, rather than exact equality.
+ */
+bool DoubleEquals(double a, double b, double epsilon)
+{
+ return std::abs(a - b) < epsilon;
+}
+
+CBlockIndex* CreateBlockIndexWithNbits(uint32_t nbits)
+{
+ CBlockIndex* block_index = new CBlockIndex();
+ block_index->nHeight = 46367;
+ block_index->nTime = 1269211443;
+ block_index->nBits = nbits;
+ return block_index;
+}
+
+CChain CreateChainWithNbits(uint32_t nbits)
+{
+ CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
+ CChain chain;
+ chain.SetTip(block_index);
+ return chain;
+}
+
+void RejectDifficultyMismatch(double difficulty, double expected_difficulty) {
+ BOOST_CHECK_MESSAGE(
+ DoubleEquals(difficulty, expected_difficulty, 0.00001),
+ "Difficulty was " + std::to_string(difficulty)
+ + " but was expected to be " + std::to_string(expected_difficulty));
+}
+
+/* Given a BlockIndex with the provided nbits,
+ * verify that the expected difficulty results.
+ */
+void TestDifficulty(uint32_t nbits, double expected_difficulty)
+{
+ CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
+ /* Since we are passing in block index explicitly,
+ * there is no need to set up anything within the chain itself.
+ */
+ CChain chain;
+
+ double difficulty = GetDifficulty(chain, block_index);
+ delete block_index;
+
+ RejectDifficultyMismatch(difficulty, expected_difficulty);
+}
+
+BOOST_FIXTURE_TEST_SUITE(blockchain_difficulty_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(get_difficulty_for_very_low_target)
+{
+ TestDifficulty(0x1f111111, 0.000001);
+}
+
+BOOST_AUTO_TEST_CASE(get_difficulty_for_low_target)
+{
+ TestDifficulty(0x1ef88f6f, 0.000016);
+}
+
+BOOST_AUTO_TEST_CASE(get_difficulty_for_mid_target)
+{
+ TestDifficulty(0x1df88f6f, 0.004023);
+}
+
+BOOST_AUTO_TEST_CASE(get_difficulty_for_high_target)
+{
+ TestDifficulty(0x1cf88f6f, 1.029916);
+}
+
+BOOST_AUTO_TEST_CASE(get_difficulty_for_very_high_target)
+{
+ TestDifficulty(0x12345678, 5913134931067755359633408.0);
+}
+
+// Verify that difficulty is 1.0 for an empty chain.
+BOOST_AUTO_TEST_CASE(get_difficulty_for_null_tip)
+{
+ CChain chain;
+ double difficulty = GetDifficulty(chain, nullptr);
+ RejectDifficultyMismatch(difficulty, 1.0);
+}
+
+/* Verify that if difficulty is based upon the block index
+ * in the chain, if no block index is explicitly specified.
+ */
+BOOST_AUTO_TEST_CASE(get_difficulty_for_null_block_index)
+{
+ CChain chain = CreateChainWithNbits(0x1df88f6f);
+
+ double difficulty = GetDifficulty(chain, nullptr);
+ delete chain.Tip();
+
+ double expected_difficulty = 0.004023;
+
+ RejectDifficultyMismatch(difficulty, expected_difficulty);
+}
+
+/* Verify that difficulty is based upon the explicitly specified
+ * block index rather than being taken from the provided chain,
+ * when both are present.
+ */
+BOOST_AUTO_TEST_CASE(get_difficulty_for_block_index_overrides_tip)
+{
+ CChain chain = CreateChainWithNbits(0x1df88f6f);
+ /* This block index's nbits should be used
+ * instead of the chain's when calculating difficulty.
+ */
+ CBlockIndex* override_block_index = CreateBlockIndexWithNbits(0x12345678);
+
+ double difficulty = GetDifficulty(chain, override_block_index);
+ delete chain.Tip();
+ delete override_block_index;
+
+ RejectDifficultyMismatch(difficulty, 5913134931067755359633408.0);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 97c548dbb0..6be717b43c 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -216,7 +216,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
entry.nFee = 11;
entry.nHeight = 11;
- LOCK(cs_main);
fCheckpointsEnabled = false;
// Simple block creation, nothing special yet:
@@ -229,27 +228,32 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i)
{
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
- pblock->nVersion = 1;
- pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
- CMutableTransaction txCoinbase(*pblock->vtx[0]);
- txCoinbase.nVersion = 1;
- txCoinbase.vin[0].scriptSig = CScript();
- txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
- txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
- txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
- txCoinbase.vout[0].scriptPubKey = CScript();
- pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
- if (txFirst.size() == 0)
- baseheight = chainActive.Height();
- if (txFirst.size() < 4)
- txFirst.push_back(pblock->vtx[0]);
- pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
- pblock->nNonce = blockinfo[i].nonce;
+ {
+ LOCK(cs_main);
+ pblock->nVersion = 1;
+ pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
+ CMutableTransaction txCoinbase(*pblock->vtx[0]);
+ txCoinbase.nVersion = 1;
+ txCoinbase.vin[0].scriptSig = CScript();
+ txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
+ txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
+ txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
+ txCoinbase.vout[0].scriptPubKey = CScript();
+ pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
+ if (txFirst.size() == 0)
+ baseheight = chainActive.Height();
+ if (txFirst.size() < 4)
+ txFirst.push_back(pblock->vtx[0]);
+ pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
+ pblock->nNonce = blockinfo[i].nonce;
+ }
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, nullptr));
pblock->hashPrevBlock = pblock->GetHash();
}
+ LOCK(cs_main);
+
// Just to make sure we can still make simple blocks
BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey));
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index f52c8ccc21..6bd228a63a 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -69,9 +69,9 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
fs::create_directories(pathTemp);
gArgs.ForceSetArg("-datadir", pathTemp.string());
- // Note that because we don't bother running a scheduler thread here,
- // callbacks via CValidationInterface are unreliable, but that's OK,
- // our unit tests aren't testing multiple parts of the code at once.
+ // We have to run a scheduler thread to prevent ActivateBestChain
+ // from blocking due to queue overrun.
+ threadGroup.create_thread(boost::bind(&CScheduler::serviceQueue, &scheduler));
GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
mempool.setSanityCheck(1.0);
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index fe8cb6126e..10acabe0be 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -66,7 +66,6 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// Test 1: block with both of those transactions should be rejected.
block = CreateAndProcessBlock(spends, scriptPubKey);
- LOCK(cs_main);
BOOST_CHECK(chainActive.Tip()->GetBlockHash() != block.GetHash());
// Test 2: ... and should be rejected if spend1 is in the memory pool
@@ -190,12 +189,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
spend_tx.vin[0].scriptSig << vchSig;
}
- LOCK(cs_main);
-
// Test that invalidity under a set of flags doesn't preclude validity
// under other (eg consensus) flags.
// spend_tx is invalid according to DERSIG
{
+ LOCK(cs_main);
+
CValidationState state;
PrecomputedTransactionData ptd_spend_tx(spend_tx);
@@ -213,15 +212,17 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
// test later that block validation works fine in the absence of cached
// successes.
ValidateCheckInputsForAllFlags(spend_tx, SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false);
+ }
- // And if we produce a block with this tx, it should be valid (DERSIG not
- // enabled yet), even though there's no cache entry.
- CBlock block;
+ // And if we produce a block with this tx, it should be valid (DERSIG not
+ // enabled yet), even though there's no cache entry.
+ CBlock block;
- block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
- BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
- BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
- }
+ block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
+ BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
+ BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
+
+ LOCK(cs_main);
// Test P2SH: construct a transaction that is valid without P2SH, and
// then test validity with P2SH.
diff --git a/src/validation.cpp b/src/validation.cpp
index 75c40b22fc..1c024ce2ae 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -40,6 +40,7 @@
#include <validationinterface.h>
#include <warnings.h>
+#include <future>
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
@@ -2559,12 +2560,21 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
+ AssertLockNotHeld(cs_main);
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
boost::this_thread::interruption_point();
+
+ if (GetMainSignals().CallbacksPending() > 10) {
+ // Block until the validation queue drains. This should largely
+ // never happen in normal operation, however may happen during
+ // reindex, causing memory blowup if we run too far ahead.
+ SyncWithValidationInterfaceQueue();
+ }
+
if (ShutdownRequested())
break;
@@ -3383,6 +3393,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
+ AssertLockNotHeld(cs_main);
+
{
CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index abbd8cc4d2..1e13465bf7 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -11,9 +11,11 @@
#include <sync.h>
#include <txmempool.h>
#include <util.h>
+#include <validation.h>
#include <list>
#include <atomic>
+#include <future>
#include <boost/signals2/signal.hpp>
@@ -54,6 +56,11 @@ void CMainSignals::FlushBackgroundCallbacks() {
}
}
+size_t CMainSignals::CallbacksPending() {
+ if (!m_internals) return 0;
+ return m_internals->m_schedulerClient.CallbacksPending();
+}
+
void CMainSignals::RegisterWithMempoolSignals(CTxMemPool& pool) {
pool.NotifyEntryRemoved.connect(boost::bind(&CMainSignals::MempoolEntryRemoved, this, _1, _2));
}
@@ -113,6 +120,16 @@ void CallFunctionInValidationInterfaceQueue(std::function<void ()> func) {
g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func));
}
+void SyncWithValidationInterfaceQueue() {
+ AssertLockNotHeld(cs_main);
+ // Block until the validation queue drains
+ std::promise<void> promise;
+ CallFunctionInValidationInterfaceQueue([&promise] {
+ promise.set_value();
+ });
+ promise.get_future().wait();
+}
+
void CMainSignals::MempoolEntryRemoved(CTransactionRef ptx, MemPoolRemovalReason reason) {
if (reason != MemPoolRemovalReason::BLOCK && reason != MemPoolRemovalReason::CONFLICT) {
m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] {
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 7b5d138414..f416f6678f 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -42,6 +42,16 @@ void UnregisterAllValidationInterfaces();
* will result in a deadlock (that DEBUG_LOCKORDER will miss).
*/
void CallFunctionInValidationInterfaceQueue(std::function<void ()> func);
+/**
+ * This is a synonym for the following, which asserts certain locks are not
+ * held:
+ * std::promise<void> promise;
+ * CallFunctionInValidationInterfaceQueue([&promise] {
+ * promise.set_value();
+ * });
+ * promise.get_future().wait();
+ */
+void SyncWithValidationInterfaceQueue();
class CValidationInterface {
protected:
@@ -131,6 +141,8 @@ public:
/** Call any remaining callbacks on the calling thread */
void FlushBackgroundCallbacks();
+ size_t CallbacksPending();
+
/** Register with mempool to call TransactionRemovedFromMempool callbacks */
void RegisterWithMempoolSignals(CTxMemPool& pool);
/** Unregister with mempool */
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 788bac2a46..71e7111b1a 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -31,7 +31,7 @@ std::string GetWalletHelpString(bool showDebug)
strUsage += HelpMessageOpt("-salvagewallet", _("Attempt to recover private keys from a corrupt wallet on startup"));
strUsage += HelpMessageOpt("-spendzeroconfchange", strprintf(_("Spend unconfirmed change when sending transactions (default: %u)"), DEFAULT_SPEND_ZEROCONF_CHANGE));
strUsage += HelpMessageOpt("-txconfirmtarget=<n>", strprintf(_("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)"), DEFAULT_TX_CONFIRM_TARGET));
- strUsage += HelpMessageOpt("-walletrbf", strprintf(_("Send transactions with full-RBF opt-in enabled (default: %u)"), DEFAULT_WALLET_RBF));
+ strUsage += HelpMessageOpt("-walletrbf", strprintf(_("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)"), DEFAULT_WALLET_RBF));
strUsage += HelpMessageOpt("-upgradewallet", _("Upgrade wallet to latest format on startup"));
strUsage += HelpMessageOpt("-wallet=<file>", _("Specify wallet file (within data directory)") + " " + strprintf(_("(default: %s)"), DEFAULT_WALLET_DAT));
strUsage += HelpMessageOpt("-walletbroadcast", _("Make the wallet broadcast transactions") + " " + strprintf(_("(default: %u)"), DEFAULT_WALLETBROADCAST));
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 80e31a1ce0..8e9362c649 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -370,8 +370,6 @@ static void AddKey(CWallet& wallet, const CKey& key)
BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
{
- LOCK(cs_main);
-
// Cap last block file size, and mine new block in a new block file.
CBlockIndex* const nullBlock = nullptr;
CBlockIndex* oldTip = chainActive.Tip();
@@ -379,6 +377,8 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
CBlockIndex* newTip = chainActive.Tip();
+ LOCK(cs_main);
+
// Verify ScanForWalletTransactions picks up transactions in both the old
// and new block files.
{
@@ -447,8 +447,6 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
// than or equal to key birthday.
BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
{
- LOCK(cs_main);
-
// Create two blocks with same timestamp to verify that importwallet rescan
// will pick up both blocks, not just the first.
const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5;
@@ -462,6 +460,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME);
coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
+ LOCK(cs_main);
+
// Import key into wallet and call dumpwallet to create backup file.
{
CWallet wallet;
@@ -627,10 +627,15 @@ public:
BOOST_CHECK(wallet->CreateTransaction({recipient}, wtx, reservekey, fee, changePos, error, dummy));
CValidationState state;
BOOST_CHECK(wallet->CommitTransaction(wtx, reservekey, nullptr, state));
+ CMutableTransaction blocktx;
+ {
+ LOCK(wallet->cs_wallet);
+ blocktx = CMutableTransaction(*wallet->mapWallet.at(wtx.GetHash()).tx);
+ }
+ CreateAndProcessBlock({CMutableTransaction(blocktx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
LOCK(wallet->cs_wallet);
auto it = wallet->mapWallet.find(wtx.GetHash());
BOOST_CHECK(it != wallet->mapWallet.end());
- CreateAndProcessBlock({CMutableTransaction(*it->second.tx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
it->second.SetMerkleBranch(chainActive.Tip(), 1);
return it->second;
}
@@ -641,7 +646,6 @@ public:
BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
{
std::string coinbaseAddress = coinbaseKey.GetPubKey().GetID().ToString();
- LOCK2(cs_main, wallet->cs_wallet);
// Confirm ListCoins initially returns 1 coin grouped under coinbaseKey
// address.
@@ -669,6 +673,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
BOOST_CHECK_EQUAL(available.size(), 2);
for (const auto& group : list) {
for (const auto& coin : group.second) {
+ LOCK(wallet->cs_wallet);
wallet->LockCoin(COutPoint(coin.tx->GetHash(), coin.i));
}
}
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index dafd708d09..cf9bb2909f 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1292,12 +1292,7 @@ void CWallet::BlockUntilSyncedToCurrentChain() {
// ...otherwise put a callback in the validation interface queue and wait
// for the queue to drain enough to execute it (indicating we are caught up
// at least with the time we entered this function).
-
- std::promise<void> promise;
- CallFunctionInValidationInterfaceQueue([&promise] {
- promise.set_value();
- });
- promise.get_future().wait();
+ SyncWithValidationInterfaceQueue();
}
diff --git a/test/functional/wallet.py b/test/functional/wallet.py
index db60df18ed..e0da5ce136 100755
--- a/test/functional/wallet.py
+++ b/test/functional/wallet.py
@@ -33,6 +33,9 @@ class WalletTest(BitcoinTestFramework):
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
+ self.log.info("Check for mempoolminfee in getmempoolinfo")
+ assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
+
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
diff --git a/test/util/bitcoin-util-test.py b/test/util/bitcoin-util-test.py
index ef34955d90..64e826ad0b 100755
--- a/test/util/bitcoin-util-test.py
+++ b/test/util/bitcoin-util-test.py
@@ -48,7 +48,7 @@ def main():
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
- input_filename = testDir + "/" + input_basename
+ input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
@@ -77,7 +77,7 @@ def bctest(testDir, testObj, buildenv):
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
- execprog = buildenv["BUILDDIR"] + "/src/" + testObj['exec'] + buildenv["EXEEXT"]
+ execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
@@ -85,24 +85,28 @@ def bctest(testDir, testObj, buildenv):
stdinCfg = None
inputData = None
if "input" in testObj:
- filename = testDir + "/" + testObj['input']
+ filename = os.path.join(testDir, testObj["input"])
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
+ outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
- outputData = open(testDir + "/" + outputFn).read()
+ outputData = open(os.path.join(testDir, outputFn)).read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
+ if not outputType:
+ logging.error("Output file %s does not have a file extension" % outputFn)
+ raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)