diff options
Diffstat (limited to 'src')
91 files changed, 1709 insertions, 836 deletions
diff --git a/src/Makefile.am b/src/Makefile.am index 90deff48b0..3e43076878 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -500,10 +500,6 @@ clean-local: ## FIXME: How to get the appropriate modulename_CPPFLAGS in here? $(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@ -.mm.o: - $(AM_V_CXX) $(OBJCXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CXXFLAGS) $(QT_INCLUDES) $(AM_CXXFLAGS) $(PIE_FLAGS) $(CXXFLAGS) -c -o $@ $< - check-symbols: $(bin_PROGRAMS) if GLIBC_BACK_COMPAT @echo "Checking glibc back compat..." diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index e4b64c1ca7..0767ee1302 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -368,6 +368,7 @@ BITCOIN_QT_INCLUDES = -I$(builddir)/qt -I$(srcdir)/qt -I$(srcdir)/qt/forms \ qt_libbitcoinqt_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BITCOIN_QT_INCLUDES) \ $(QT_INCLUDES) $(QT_DBUS_INCLUDES) $(PROTOBUF_CFLAGS) $(QR_CFLAGS) qt_libbitcoinqt_a_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS) +qt_libbitcoinqt_a_OBJCXXFLAGS = $(AM_OBJCXXFLAGS) $(QT_PIE_FLAGS) qt_libbitcoinqt_a_SOURCES = $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(QT_FORMS_UI) \ $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(PROTOBUF_PROTO) $(RES_ICONS) $(RES_IMAGES) $(RES_MOVIES) diff --git a/src/addrman.h b/src/addrman.h index 18f3062287..f347cba6ca 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -455,6 +455,7 @@ public: void Clear() { + LOCK(cs); std::vector<int>().swap(vRandom); nKey = GetRandHash(); for (size_t bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { diff --git a/src/arith_uint256.h b/src/arith_uint256.h index 5fd4fe96cf..1009fe1cc8 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -25,7 +25,7 @@ template<unsigned int BITS> class base_uint { protected: - enum { WIDTH=BITS/32 }; + static constexpr int WIDTH = BITS / 32; uint32_t pn[WIDTH]; public: diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp index 7b307d6f42..4c5a036773 100644 --- a/src/bench/bench.cpp +++ b/src/bench/bench.cpp @@ -8,29 +8,25 @@ #include <assert.h> #include <iostream> #include <iomanip> -#include <sys/time.h> benchmark::BenchRunner::BenchmarkMap &benchmark::BenchRunner::benchmarks() { static std::map<std::string, benchmark::BenchFunction> benchmarks_map; return benchmarks_map; } -static double gettimedouble(void) { - struct timeval tv; - gettimeofday(&tv, nullptr); - return tv.tv_usec * 0.000001 + tv.tv_sec; -} - benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func) { benchmarks().insert(std::make_pair(name, func)); } void -benchmark::BenchRunner::RunAll(double elapsedTimeForOne) +benchmark::BenchRunner::RunAll(benchmark::duration elapsedTimeForOne) { perf_init(); - std::cout << "#Benchmark" << "," << "count" << "," << "min" << "," << "max" << "," << "average" << "," + if (std::ratio_less_equal<benchmark::clock::period, std::micro>::value) { + std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n"; + } + std::cout << "#Benchmark" << "," << "count" << "," << "min(ns)" << "," << "max(ns)" << "," << "average(ns)" << "," << "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n"; for (const auto &p: benchmarks()) { @@ -46,16 +42,17 @@ bool benchmark::State::KeepRunning() ++count; return true; } - double now; + time_point now; + uint64_t nowCycles; if (count == 0) { - lastTime = beginTime = now = gettimedouble(); + lastTime = beginTime = now = clock::now(); lastCycles = beginCycles = nowCycles = perf_cpucycles(); } else { - now = gettimedouble(); - double elapsed = now - lastTime; - double elapsedOne = elapsed / (countMask + 1); + now = clock::now(); + auto elapsed = now - lastTime; + auto elapsedOne = elapsed / (countMask + 1); if (elapsedOne < minTime) minTime = elapsedOne; if (elapsedOne > maxTime) maxTime = elapsedOne; @@ -70,8 +67,8 @@ bool benchmark::State::KeepRunning() // The restart avoids including the overhead of this code in the measurement. countMask = ((countMask<<3)|7) & ((1LL<<60)-1); count = 0; - minTime = std::numeric_limits<double>::max(); - maxTime = std::numeric_limits<double>::min(); + minTime = duration::max(); + maxTime = duration::zero(); minCycles = std::numeric_limits<uint64_t>::max(); maxCycles = std::numeric_limits<uint64_t>::min(); return true; @@ -94,9 +91,13 @@ bool benchmark::State::KeepRunning() assert(count != 0 && "count == 0 => (now == 0 && beginTime == 0) => return above"); // Output results - double average = (now-beginTime)/count; + // Duration casts are only necessary here because hardware with sub-nanosecond clocks + // will lose precision. + int64_t min_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(minTime).count(); + int64_t max_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime).count(); + int64_t avg_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>((now-beginTime)/count).count(); int64_t averageCycles = (nowCycles-beginCycles)/count; - std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << minTime << "," << maxTime << "," << average << "," + std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << min_elapsed << "," << max_elapsed << "," << avg_elapsed << "," << minCycles << "," << maxCycles << "," << averageCycles << "\n"; std::cout.copyfmt(std::ios(nullptr)); diff --git a/src/bench/bench.h b/src/bench/bench.h index 79109eaa56..ab5c3d5604 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -9,6 +9,7 @@ #include <limits> #include <map> #include <string> +#include <chrono> #include <boost/preprocessor/cat.hpp> #include <boost/preprocessor/stringize.hpp> @@ -36,12 +37,21 @@ BENCHMARK(CODE_TO_TIME); */ namespace benchmark { + // In case high_resolution_clock is steady, prefer that, otherwise use steady_clock. + struct best_clock { + using hi_res_clock = std::chrono::high_resolution_clock; + using steady_clock = std::chrono::steady_clock; + using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type; + }; + using clock = best_clock::type; + using time_point = clock::time_point; + using duration = clock::duration; class State { std::string name; - double maxElapsed; - double beginTime; - double lastTime, minTime, maxTime; + duration maxElapsed; + time_point beginTime, lastTime; + duration minTime, maxTime; uint64_t count; uint64_t countMask; uint64_t beginCycles; @@ -49,9 +59,9 @@ namespace benchmark { uint64_t minCycles; uint64_t maxCycles; public: - State(std::string _name, double _maxElapsed) : name(_name), maxElapsed(_maxElapsed), count(0) { - minTime = std::numeric_limits<double>::max(); - maxTime = std::numeric_limits<double>::min(); + State(std::string _name, duration _maxElapsed) : name(_name), maxElapsed(_maxElapsed), count(0) { + minTime = duration::max(); + maxTime = duration::zero(); minCycles = std::numeric_limits<uint64_t>::max(); maxCycles = std::numeric_limits<uint64_t>::min(); countMask = 1; @@ -69,7 +79,7 @@ namespace benchmark { public: BenchRunner(std::string name, BenchFunction func); - static void RunAll(double elapsedTimeForOne=1.0); + static void RunAll(duration elapsedTimeForOne = std::chrono::seconds(1)); }; } diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp index 73c02cf718..a93d0fb0a5 100644 --- a/src/bench/rollingbloom.cpp +++ b/src/bench/rollingbloom.cpp @@ -6,7 +6,6 @@ #include "bench.h" #include "bloom.h" -#include "utiltime.h" static void RollingBloom(benchmark::State& state) { @@ -23,10 +22,10 @@ static void RollingBloom(benchmark::State& state) data[2] = count >> 16; data[3] = count >> 24; if (countnow == nEntriesPerGeneration) { - int64_t b = GetTimeMicros(); + auto b = benchmark::clock::now(); filter.insert(data); - int64_t e = GetTimeMicros(); - std::cout << "RollingBloom-refresh,1," << (e-b)*0.000001 << "," << (e-b)*0.000001 << "," << (e-b)*0.000001 << "\n"; + auto total = std::chrono::duration_cast<std::chrono::nanoseconds>(benchmark::clock::now() - b).count(); + std::cout << "RollingBloom-refresh,1," << total << "," << total << "," << total << "\n"; countnow = 0; } else { filter.insert(data); diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index a20222d05c..b499b15507 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -387,6 +387,10 @@ static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& s scriptPubKey = GetScriptForWitness(scriptPubKey); } if (bScriptHash) { + if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) { + throw std::runtime_error(strprintf( + "redeemScript exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_ELEMENT_SIZE)); + } // Get the ID for the script, and then construct a P2SH destination for it. scriptPubKey = GetScriptForDestination(CScriptID(scriptPubKey)); } @@ -447,10 +451,19 @@ static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& str bScriptHash = (flags.find("S") != std::string::npos); } + if (scriptPubKey.size() > MAX_SCRIPT_SIZE) { + throw std::runtime_error(strprintf( + "script exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_SIZE)); + } + if (bSegWit) { scriptPubKey = GetScriptForWitness(scriptPubKey); } if (bScriptHash) { + if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) { + throw std::runtime_error(strprintf( + "redeemScript exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_ELEMENT_SIZE)); + } scriptPubKey = GetScriptForDestination(CScriptID(scriptPubKey)); } diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index 543eba0e69..5f88c35dbd 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -120,7 +120,7 @@ bool AppInit(int argc, char* argv[]) for (int i = 1; i < argc; i++) { if (!IsSwitchChar(argv[i][0])) { fprintf(stderr, "Error: Command line contains unexpected token '%s', see bitcoind -h for a list of options.\n", argv[i]); - exit(EXIT_FAILURE); + return false; } } @@ -132,17 +132,17 @@ bool AppInit(int argc, char* argv[]) if (!AppInitBasicSetup()) { // InitError will have been called with detailed error, which ends up on console - exit(EXIT_FAILURE); + return false; } if (!AppInitParameterInteraction()) { // InitError will have been called with detailed error, which ends up on console - exit(EXIT_FAILURE); + return false; } if (!AppInitSanityChecks()) { // InitError will have been called with detailed error, which ends up on console - exit(EXIT_FAILURE); + return false; } if (gArgs.GetBoolArg("-daemon", false)) { @@ -163,7 +163,7 @@ bool AppInit(int argc, char* argv[]) if (!AppInitLockDataDirectory()) { // If locking the data directory failed, exit immediately - exit(EXIT_FAILURE); + return false; } fRet = AppInitMain(threadGroup, scheduler); } diff --git a/src/chain.h b/src/chain.h index f1036e5d92..0667e0121f 100644 --- a/src/chain.h +++ b/src/chain.h @@ -304,7 +304,7 @@ public: return (int64_t)nTimeMax; } - enum { nMedianTimeSpan=11 }; + static constexpr int nMedianTimeSpan = 11; int64_t GetMedianTimePast() const { diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 85c9cd6934..950bdd945c 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -75,6 +75,7 @@ public: CMainParams() { strNetworkID = "main"; consensus.nSubsidyHalvingInterval = 210000; + consensus.BIP16Height = 173805; // 00000000000000ce80a7e057163a4db1d5ad7b20fb6f598c9597b9665c8fb0d4 - April 1, 2012 consensus.BIP34Height = 227931; consensus.BIP34Hash = uint256S("0x000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8"); consensus.BIP65Height = 388381; // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0 @@ -181,6 +182,7 @@ public: CTestNetParams() { strNetworkID = "test"; consensus.nSubsidyHalvingInterval = 210000; + consensus.BIP16Height = 514; // 00000000040b4e986385315e14bee30ad876d8b47f748025b26683116d21aa65 consensus.BIP34Height = 21111; consensus.BIP34Hash = uint256S("0x0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8"); consensus.BIP65Height = 581885; // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6 @@ -230,7 +232,6 @@ public: vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch", true); vSeeds.emplace_back("seed.tbtc.petertodd.org", true); vSeeds.emplace_back("testnet-seed.bluematt.me", false); - vSeeds.emplace_back("testnet-seed.bitcoin.schildbach.de", false); base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111); base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196); @@ -271,6 +272,7 @@ public: CRegTestParams() { strNetworkID = "regtest"; consensus.nSubsidyHalvingInterval = 150; + consensus.BIP16Height = 0; // always enforce P2SH BIP16 on regtest consensus.BIP34Height = 100000000; // BIP34 has not activated on regtest (far in the future so block v1 are not rejected in tests) consensus.BIP34Hash = uint256(); consensus.BIP65Height = 1351; // BIP65 activated on regtest (Used in rpc activation tests) @@ -284,13 +286,13 @@ public: consensus.nMinerConfirmationWindow = 144; // Faster than normal for regtest (144 instead of 2016) consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 0; - consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 999999999999ULL; + consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_CSV].bit = 0; consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 0; - consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 999999999999ULL; + consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].bit = 1; - consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 0; - consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 999999999999ULL; + consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = Consensus::BIP9Deployment::ALWAYS_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00"); diff --git a/src/coins.cpp b/src/coins.cpp index b45dfc3342..8fdde72ede 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -146,56 +146,58 @@ void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) { } bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn) { - for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { - if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization). - CCoinsMap::iterator itUs = cacheCoins.find(it->first); - if (itUs == cacheCoins.end()) { - // The parent cache does not have an entry, while the child does - // We can ignore it if it's both FRESH and pruned in the child - if (!(it->second.flags & CCoinsCacheEntry::FRESH && it->second.coin.IsSpent())) { - // Otherwise we will need to create it in the parent - // and move the data up and mark it as dirty - CCoinsCacheEntry& entry = cacheCoins[it->first]; - entry.coin = std::move(it->second.coin); - cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); - entry.flags = CCoinsCacheEntry::DIRTY; - // We can mark it FRESH in the parent if it was FRESH in the child - // Otherwise it might have just been flushed from the parent's cache - // and already exist in the grandparent - if (it->second.flags & CCoinsCacheEntry::FRESH) - entry.flags |= CCoinsCacheEntry::FRESH; + for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = mapCoins.erase(it)) { + // Ignore non-dirty entries (optimization). + if (!(it->second.flags & CCoinsCacheEntry::DIRTY)) { + continue; + } + CCoinsMap::iterator itUs = cacheCoins.find(it->first); + if (itUs == cacheCoins.end()) { + // The parent cache does not have an entry, while the child does + // We can ignore it if it's both FRESH and pruned in the child + if (!(it->second.flags & CCoinsCacheEntry::FRESH && it->second.coin.IsSpent())) { + // Otherwise we will need to create it in the parent + // and move the data up and mark it as dirty + CCoinsCacheEntry& entry = cacheCoins[it->first]; + entry.coin = std::move(it->second.coin); + cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); + entry.flags = CCoinsCacheEntry::DIRTY; + // We can mark it FRESH in the parent if it was FRESH in the child + // Otherwise it might have just been flushed from the parent's cache + // and already exist in the grandparent + if (it->second.flags & CCoinsCacheEntry::FRESH) { + entry.flags |= CCoinsCacheEntry::FRESH; } - } else { - // Assert that the child cache entry was not marked FRESH if the - // parent cache entry has unspent outputs. If this ever happens, - // it means the FRESH flag was misapplied and there is a logic - // error in the calling code. - if ((it->second.flags & CCoinsCacheEntry::FRESH) && !itUs->second.coin.IsSpent()) - throw std::logic_error("FRESH flag misapplied to cache entry for base transaction with spendable outputs"); + } + } else { + // Assert that the child cache entry was not marked FRESH if the + // parent cache entry has unspent outputs. If this ever happens, + // it means the FRESH flag was misapplied and there is a logic + // error in the calling code. + if ((it->second.flags & CCoinsCacheEntry::FRESH) && !itUs->second.coin.IsSpent()) { + throw std::logic_error("FRESH flag misapplied to cache entry for base transaction with spendable outputs"); + } - // Found the entry in the parent cache - if ((itUs->second.flags & CCoinsCacheEntry::FRESH) && it->second.coin.IsSpent()) { - // The grandparent does not have an entry, and the child is - // modified and being pruned. This means we can just delete - // it from the parent. - cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); - cacheCoins.erase(itUs); - } else { - // A normal modification. - cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); - itUs->second.coin = std::move(it->second.coin); - cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); - itUs->second.flags |= CCoinsCacheEntry::DIRTY; - // NOTE: It is possible the child has a FRESH flag here in - // the event the entry we found in the parent is pruned. But - // we must not copy that FRESH flag to the parent as that - // pruned state likely still needs to be communicated to the - // grandparent. - } + // Found the entry in the parent cache + if ((itUs->second.flags & CCoinsCacheEntry::FRESH) && it->second.coin.IsSpent()) { + // The grandparent does not have an entry, and the child is + // modified and being pruned. This means we can just delete + // it from the parent. + cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); + cacheCoins.erase(itUs); + } else { + // A normal modification. + cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); + itUs->second.coin = std::move(it->second.coin); + cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); + itUs->second.flags |= CCoinsCacheEntry::DIRTY; + // NOTE: It is possible the child has a FRESH flag here in + // the event the entry we found in the parent is pruned. But + // we must not copy that FRESH flag to the parent as that + // pruned state likely still needs to be communicated to the + // grandparent. } } - CCoinsMap::iterator itOld = it++; - mapCoins.erase(itOld); } hashBlock = hashBlockIn; return true; diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index ddd4ee9fab..6e3bac2d0e 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -24,12 +24,9 @@ static const size_t MIN_TRANSACTION_WEIGHT = WITNESS_SCALE_FACTOR * 60; // 60 is static const size_t MIN_SERIALIZABLE_TRANSACTION_WEIGHT = WITNESS_SCALE_FACTOR * 10; // 10 is the lower bound for the size of a serialized CTransaction /** Flags for nSequence and nLockTime locks */ -enum { - /* Interpret sequence numbers as relative lock-time constraints. */ - LOCKTIME_VERIFY_SEQUENCE = (1 << 0), - - /* Use GetMedianTimePast() instead of nTime for end point timestamp. */ - LOCKTIME_MEDIAN_TIME_PAST = (1 << 1), -}; +/** Interpret sequence numbers as relative lock-time constraints. */ +static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0); +/** Use GetMedianTimePast() instead of nTime for end point timestamp. */ +static constexpr unsigned int LOCKTIME_MEDIAN_TIME_PAST = (1 << 1); #endif // BITCOIN_CONSENSUS_CONSENSUS_H diff --git a/src/consensus/params.h b/src/consensus/params.h index 6240e82857..fd0946a612 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -7,6 +7,7 @@ #define BITCOIN_CONSENSUS_PARAMS_H #include "uint256.h" +#include <limits> #include <map> #include <string> @@ -31,6 +32,15 @@ struct BIP9Deployment { int64_t nStartTime; /** Timeout/expiry MedianTime for the deployment attempt. */ int64_t nTimeout; + + /** Constant for nTimeout very far in the future. */ + static constexpr int64_t NO_TIMEOUT = std::numeric_limits<int64_t>::max(); + + /** Special value for nStartTime indicating that the deployment is always active. + * This is useful for testing, as it means tests don't need to deal with the activation + * process (which takes at least 3 BIP9 intervals). Only tests that specifically test the + * behaviour during activation cannot use this. */ + static constexpr int64_t ALWAYS_ACTIVE = -1; }; /** @@ -39,6 +49,8 @@ struct BIP9Deployment { struct Params { uint256 hashGenesisBlock; int nSubsidyHalvingInterval; + /** Block height at which BIP16 becomes active */ + int BIP16Height; /** Block height and hash at which BIP34 becomes active */ int BIP34Height; uint256 BIP34Hash; diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 93f0a18668..dbd09595c6 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -62,7 +62,7 @@ private: /* Pre-base64-encoded authentication token */ static std::string strRPCUserColonPass; /* Stored RPC timer interface (for unregistration) */ -static HTTPRPCTimerInterface* httpRPCTimerInterface = nullptr; +static std::unique_ptr<HTTPRPCTimerInterface> httpRPCTimerInterface; static void JSONErrorReply(HTTPRequest* req, const UniValue& objError, const UniValue& id) { @@ -238,8 +238,8 @@ bool StartHTTPRPC() RegisterHTTPHandler("/wallet/", false, HTTPReq_JSONRPC); #endif assert(EventBase()); - httpRPCTimerInterface = new HTTPRPCTimerInterface(EventBase()); - RPCSetTimerInterface(httpRPCTimerInterface); + httpRPCTimerInterface = MakeUnique<HTTPRPCTimerInterface>(EventBase()); + RPCSetTimerInterface(httpRPCTimerInterface.get()); return true; } @@ -253,8 +253,7 @@ void StopHTTPRPC() LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n"); UnregisterHTTPHandler("/", true); if (httpRPCTimerInterface) { - RPCUnsetTimerInterface(httpRPCTimerInterface); - delete httpRPCTimerInterface; - httpRPCTimerInterface = nullptr; + RPCUnsetTimerInterface(httpRPCTimerInterface.get()); + httpRPCTimerInterface.reset(); } } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 5923871691..f6cbaa20b7 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -24,6 +24,7 @@ #include <event2/thread.h> #include <event2/buffer.h> +#include <event2/bufferevent.h> #include <event2/util.h> #include <event2/keyvalq_struct.h> @@ -239,6 +240,16 @@ static std::string RequestMethodString(HTTPRequest::RequestMethod m) /** HTTP request callback */ static void http_request_cb(struct evhttp_request* req, void* arg) { + // Disable reading to work around a libevent bug, fixed in 2.2.0. + if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02020001) { + evhttp_connection* conn = evhttp_request_get_connection(req); + if (conn) { + bufferevent* bev = evhttp_connection_get_bufferevent(conn); + if (bev) { + bufferevent_disable(bev, EV_READ); + } + } + } std::unique_ptr<HTTPRequest> hreq(new HTTPRequest(req)); LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n", @@ -481,6 +492,8 @@ void StopHTTPServer() } if (eventBase) { LogPrint(BCLog::HTTP, "Waiting for HTTP event thread to exit\n"); + // Exit the event loop as soon as there are no active events. + event_base_loopexit(eventBase, nullptr); // Give event loop a few seconds to exit (to send back last RPC responses), then break it // Before this was solved with event_base_loopexit, but that didn't work as expected in // at least libevent 2.0.21 and always introduced a delay. In libevent @@ -599,8 +612,21 @@ void HTTPRequest::WriteReply(int nStatus, const std::string& strReply) struct evbuffer* evb = evhttp_request_get_output_buffer(req); assert(evb); evbuffer_add(evb, strReply.data(), strReply.size()); - HTTPEvent* ev = new HTTPEvent(eventBase, true, - std::bind(evhttp_send_reply, req, nStatus, (const char*)nullptr, (struct evbuffer *)nullptr)); + auto req_copy = req; + HTTPEvent* ev = new HTTPEvent(eventBase, true, [req_copy, nStatus]{ + evhttp_send_reply(req_copy, nStatus, nullptr, nullptr); + // Re-enable reading from the socket. This is the second part of the libevent + // workaround above. + if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02020001) { + evhttp_connection* conn = evhttp_request_get_connection(req_copy); + if (conn) { + bufferevent* bev = evhttp_connection_get_bufferevent(conn); + if (bev) { + bufferevent_enable(bev, EV_READ | EV_WRITE); + } + } + } + }); ev->trigger(nullptr); replySent = true; req = nullptr; // transferred back to main thread diff --git a/src/init.cpp b/src/init.cpp index f63ad7f5d3..7ac2cf2d32 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -152,7 +152,7 @@ public: // Writes do not need similar protection, as failure to write is handled by the caller. }; -static CCoinsViewErrorCatcher *pcoinscatcher = nullptr; +static std::unique_ptr<CCoinsViewErrorCatcher> pcoinscatcher; static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle; void Interrupt(boost::thread_group& threadGroup) @@ -235,14 +235,10 @@ void Shutdown() if (pcoinsTip != nullptr) { FlushStateToDisk(); } - delete pcoinsTip; - pcoinsTip = nullptr; - delete pcoinscatcher; - pcoinscatcher = nullptr; - delete pcoinsdbview; - pcoinsdbview = nullptr; - delete pblocktree; - pblocktree = nullptr; + pcoinsTip.reset(); + pcoinscatcher.reset(); + pcoinsdbview.reset(); + pblocktree.reset(); } #ifdef ENABLE_WALLET StopWallets(); @@ -544,14 +540,14 @@ static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex } static bool fHaveGenesis = false; -static boost::mutex cs_GenesisWait; +static CWaitableCriticalSection cs_GenesisWait; static CConditionVariable condvar_GenesisWait; static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) { if (pBlockIndex != nullptr) { { - boost::unique_lock<boost::mutex> lock_GenesisWait(cs_GenesisWait); + WaitableLock lock_GenesisWait(cs_GenesisWait); fHaveGenesis = true; } condvar_GenesisWait.notify_all(); @@ -588,7 +584,7 @@ void CleanupBlockRevFiles() LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n"); fs::path blocksdir = GetDataDir() / "blocks"; for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { - if (is_regular_file(*it) && + if (fs::is_regular_file(*it) && it->path().filename().string().length() == 12 && it->path().filename().string().substr(8,4) == ".dat") { @@ -1270,7 +1266,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) g_connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()))); CConnman& connman = *g_connman; - peerLogic.reset(new PeerLogicValidation(&connman)); + peerLogic.reset(new PeerLogicValidation(&connman, scheduler)); RegisterValidationInterface(peerLogic.get()); // sanitize comments per BIP-0014, format user agent and check total size @@ -1406,12 +1402,10 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) do { try { UnloadBlockIndex(); - delete pcoinsTip; - delete pcoinsdbview; - delete pcoinscatcher; - delete pblocktree; - - pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReset); + pcoinsTip.reset(); + pcoinsdbview.reset(); + pcoinscatcher.reset(); + pblocktree.reset(new CBlockTreeDB(nBlockTreeDBCache, false, fReset)); if (fReset) { pblocktree->WriteReindexing(true); @@ -1462,8 +1456,8 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) // At this point we're either in reindex or we've loaded a useful // block tree into mapBlockIndex! - pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReset || fReindexChainState); - pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview); + pcoinsdbview.reset(new CCoinsViewDB(nCoinDBCache, false, fReset || fReindexChainState)); + pcoinscatcher.reset(new CCoinsViewErrorCatcher(pcoinsdbview.get())); // If necessary, upgrade from older database format. // This is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate @@ -1473,13 +1467,13 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) } // ReplayBlocks is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate - if (!ReplayBlocks(chainparams, pcoinsdbview)) { + if (!ReplayBlocks(chainparams, pcoinsdbview.get())) { strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate."); break; } // The on-disk coinsdb is now in a good state, create the cache - pcoinsTip = new CCoinsViewCache(pcoinscatcher); + pcoinsTip.reset(new CCoinsViewCache(pcoinscatcher.get())); bool is_coinsview_empty = fReset || fReindexChainState || pcoinsTip->GetBestBlock().IsNull(); if (!is_coinsview_empty) { @@ -1521,7 +1515,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) } } - if (!CVerifyDB().VerifyDB(chainparams, pcoinsdbview, gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), + if (!CVerifyDB().VerifyDB(chainparams, pcoinsdbview.get(), gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; @@ -1630,7 +1624,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) // Wait for genesis block to be processed { - boost::unique_lock<boost::mutex> lock(cs_GenesisWait); + WaitableLock lock(cs_GenesisWait); while (!fHaveGenesis) { condvar_GenesisWait.wait(lock); } diff --git a/src/keystore.cpp b/src/keystore.cpp index 8454175ca8..e2ce474298 100644 --- a/src/keystore.cpp +++ b/src/keystore.cpp @@ -36,6 +36,33 @@ bool CBasicKeyStore::AddKeyPubKey(const CKey& key, const CPubKey &pubkey) return true; } +bool CBasicKeyStore::HaveKey(const CKeyID &address) const +{ + LOCK(cs_KeyStore); + return mapKeys.count(address) > 0; +} + +std::set<CKeyID> CBasicKeyStore::GetKeys() const +{ + LOCK(cs_KeyStore); + std::set<CKeyID> set_address; + for (const auto& mi : mapKeys) { + set_address.insert(mi.first); + } + return set_address; +} + +bool CBasicKeyStore::GetKey(const CKeyID &address, CKey &keyOut) const +{ + LOCK(cs_KeyStore); + KeyMap::const_iterator mi = mapKeys.find(address); + if (mi != mapKeys.end()) { + keyOut = mi->second; + return true; + } + return false; +} + bool CBasicKeyStore::AddCScript(const CScript& redeemScript) { if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) diff --git a/src/keystore.h b/src/keystore.h index 9b85ddb0ec..26ddff436f 100644 --- a/src/keystore.h +++ b/src/keystore.h @@ -62,37 +62,9 @@ protected: public: bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; - bool HaveKey(const CKeyID &address) const override - { - bool result; - { - LOCK(cs_KeyStore); - result = (mapKeys.count(address) > 0); - } - return result; - } - std::set<CKeyID> GetKeys() const override - { - LOCK(cs_KeyStore); - std::set<CKeyID> set_address; - for (const auto& mi : mapKeys) { - set_address.insert(mi.first); - } - return set_address; - } - bool GetKey(const CKeyID &address, CKey &keyOut) const override - { - { - LOCK(cs_KeyStore); - KeyMap::const_iterator mi = mapKeys.find(address); - if (mi != mapKeys.end()) - { - keyOut = mi->second; - return true; - } - } - return false; - } + bool HaveKey(const CKeyID &address) const override; + std::set<CKeyID> GetKeys() const override; + bool GetKey(const CKeyID &address, CKey &keyOut) const override; bool AddCScript(const CScript& redeemScript) override; bool HaveCScript(const CScriptID &hash) const override; bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const override; diff --git a/src/net.cpp b/src/net.cpp index 258599747a..0f9de81d7f 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -961,6 +961,16 @@ static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEviction return a.nTimeConnected > b.nTimeConnected; } + +//! Sort an array by the specified comparator, then erase the last K elements. +template<typename T, typename Comparator> +static void EraseLastKElements(std::vector<T> &elements, Comparator comparator, size_t k) +{ + std::sort(elements.begin(), elements.end(), comparator); + size_t eraseSize = std::min(k, elements.size()); + elements.erase(elements.end() - eraseSize, elements.end()); +} + /** Try to find a connection to evict when the node is full. * Extreme care must be taken to avoid opening the node to attacker * triggered network partitioning. @@ -990,42 +1000,23 @@ bool CConnman::AttemptToEvictConnection() } } - if (vEvictionCandidates.empty()) return false; - // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. // An attacker cannot predict which netgroups will be protected - std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNetGroupKeyed); - vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end()); - - if (vEvictionCandidates.empty()) return false; - + EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4); // Protect the 8 nodes with the lowest minimum ping time. // An attacker cannot manipulate this metric without physically moving nodes closer to the target. - std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeMinPingTime); - vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(8, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end()); - - if (vEvictionCandidates.empty()) return false; - + EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8); // Protect 4 nodes that most recently sent us transactions. // An attacker cannot manipulate this metric without performing useful work. - std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeTXTime); - vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end()); - - if (vEvictionCandidates.empty()) return false; - + EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4); // Protect 4 nodes that most recently sent us blocks. // An attacker cannot manipulate this metric without performing useful work. - std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeBlockTime); - vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end()); - - if (vEvictionCandidates.empty()) return false; - + EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4); // Protect the half of the remaining nodes which have been connected the longest. // This replicates the non-eviction implicit behavior, and precludes attacks that start later. - std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeTimeConnected); - vEvictionCandidates.erase(vEvictionCandidates.end() - static_cast<int>(vEvictionCandidates.size() / 2), vEvictionCandidates.end()); + EraseLastKElements(vEvictionCandidates, ReverseCompareNodeTimeConnected, vEvictionCandidates.size() / 2); if (vEvictionCandidates.empty()) return false; @@ -1036,12 +1027,12 @@ bool CConnman::AttemptToEvictConnection() int64_t nMostConnectionsTime = 0; std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes; for (const NodeEvictionCandidate &node : vEvictionCandidates) { - mapNetGroupNodes[node.nKeyedNetGroup].push_back(node); - int64_t grouptime = mapNetGroupNodes[node.nKeyedNetGroup][0].nTimeConnected; - size_t groupsize = mapNetGroupNodes[node.nKeyedNetGroup].size(); + std::vector<NodeEvictionCandidate> &group = mapNetGroupNodes[node.nKeyedNetGroup]; + group.push_back(node); + int64_t grouptime = group[0].nTimeConnected; - if (groupsize > nMostConnections || (groupsize == nMostConnections && grouptime > nMostConnectionsTime)) { - nMostConnections = groupsize; + if (group.size() > nMostConnections || (group.size() == nMostConnections && grouptime > nMostConnectionsTime)) { + nMostConnections = group.size(); nMostConnectionsTime = grouptime; naMostConnections = node.nKeyedNetGroup; } @@ -1543,22 +1534,20 @@ void ThreadMapPort() void MapPort(bool fUseUPnP) { - static boost::thread* upnp_thread = nullptr; + static std::unique_ptr<boost::thread> upnp_thread; if (fUseUPnP) { if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); - delete upnp_thread; } - upnp_thread = new boost::thread(boost::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort)); + upnp_thread.reset(new boost::thread(boost::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort))); } else if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); - delete upnp_thread; - upnp_thread = nullptr; + upnp_thread.reset(); } } @@ -1693,6 +1682,37 @@ void CConnman::ProcessOneShot() } } +bool CConnman::GetTryNewOutboundPeer() +{ + return m_try_another_outbound_peer; +} + +void CConnman::SetTryNewOutboundPeer(bool flag) +{ + m_try_another_outbound_peer = flag; + LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n", flag ? "true" : "false"); +} + +// Return the number of peers we have over our outbound connection limit +// Exclude peers that are marked for disconnect, or are going to be +// disconnected soon (eg one-shots and feelers) +// Also exclude peers that haven't finished initial connection handshake yet +// (so that we don't decide we're over our desired connection limit, and then +// evict some peer that has finished the handshake) +int CConnman::GetExtraOutboundCount() +{ + int nOutbound = 0; + { + LOCK(cs_vNodes); + for (CNode* pnode : vNodes) { + if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) { + ++nOutbound; + } + } + } + return std::max(nOutbound - nMaxOutbound, 0); +} + void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) { // Connect to specific addresses @@ -1781,7 +1801,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // * Only make a feeler connection once every few minutes. // bool fFeeler = false; - if (nOutbound >= nMaxOutbound) { + + if (nOutbound >= nMaxOutbound && !GetTryNewOutboundPeer()) { int64_t nTime = GetTimeMicros(); // The current time right now (in microseconds). if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); @@ -2201,9 +2222,8 @@ CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In) : nSeed0(nSeed0In), nSe nLastNodeId = 0; nSendBufferMaxSize = 0; nReceiveFloodSize = 0; - semOutbound = nullptr; - semAddnode = nullptr; flagInterruptMsgProc = false; + SetTryNewOutboundPeer(false); Options connOptions; Init(connOptions); @@ -2307,11 +2327,11 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) if (semOutbound == nullptr) { // initialize semaphore - semOutbound = new CSemaphore(std::min((nMaxOutbound + nMaxFeeler), nMaxConnections)); + semOutbound = MakeUnique<CSemaphore>(std::min((nMaxOutbound + nMaxFeeler), nMaxConnections)); } if (semAddnode == nullptr) { // initialize semaphore - semAddnode = new CSemaphore(nMaxAddnode); + semAddnode = MakeUnique<CSemaphore>(nMaxAddnode); } // @@ -2434,10 +2454,8 @@ void CConnman::Stop() vNodes.clear(); vNodesDisconnected.clear(); vhListenSocket.clear(); - delete semOutbound; - semOutbound = nullptr; - delete semAddnode; - semAddnode = nullptr; + semOutbound.reset(); + semAddnode.reset(); } void CConnman::DeleteNode(CNode* pnode) @@ -2723,7 +2741,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn nNextInvSend = 0; fRelayTxes = false; fSentAddr = false; - pfilter = new CBloomFilter(); + pfilter = MakeUnique<CBloomFilter>(); timeLastMempoolReq = 0; nLastBlockTime = 0; nLastTXTime = 0; @@ -2753,9 +2771,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn CNode::~CNode() { CloseSocket(hSocket); - - if (pfilter) - delete pfilter; } void CNode::AskFor(const CInv& inv) @@ -251,6 +251,19 @@ public: void GetBanned(banmap_t &banmap); void SetBanned(const banmap_t &banmap); + // This allows temporarily exceeding nMaxOutbound, with the goal of finding + // a peer that is better than all our current peers. + void SetTryNewOutboundPeer(bool flag); + bool GetTryNewOutboundPeer(); + + // Return the number of outbound peers we have in excess of our target (eg, + // if we previously called SetTryNewOutboundPeer(true), and have since set + // to false, we may have extra peers that we wish to disconnect). This may + // return a value less than (num_outbound_connections - num_outbound_slots) + // in cases where some outbound connections are not yet fully connected, or + // not yet fully disconnected. + int GetExtraOutboundCount(); + bool AddNode(const std::string& node); bool RemoveAddedNode(const std::string& node); std::vector<AddedNodeInfo> GetAddedNodeInfo(); @@ -386,8 +399,8 @@ private: /** Services this instance offers */ ServiceFlags nLocalServices; - CSemaphore *semOutbound; - CSemaphore *semAddnode; + std::unique_ptr<CSemaphore> semOutbound; + std::unique_ptr<CSemaphore> semAddnode; int nMaxConnections; int nMaxOutbound; int nMaxAddnode; @@ -413,6 +426,13 @@ private: std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; + + /** flag for deciding to connect to an extra outbound peer, + * in excess of nMaxOutbound + * This takes the place of a feeler connection */ + std::atomic_bool m_try_another_outbound_peer; + + friend struct CConnmanTest; }; extern std::unique_ptr<CConnman> g_connman; void Discover(boost::thread_group& threadGroup); @@ -628,7 +648,7 @@ public: bool fSentAddr; CSemaphoreGrant grantOutbound; CCriticalSection cs_filter; - CBloomFilter* pfilter; + std::unique_ptr<CBloomFilter> pfilter; std::atomic<int> nRefCount; const uint64_t nKeyedNetGroup; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6c26cd4cee..8e503f89db 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -23,6 +23,7 @@ #include "primitives/transaction.h" #include "random.h" #include "reverse_iterator.h" +#include "scheduler.h" #include "tinyformat.h" #include "txmempool.h" #include "ui_interface.h" @@ -124,6 +125,12 @@ namespace { /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads = 0; + /** Number of outbound peers with m_chain_sync.m_protect. */ + int g_outbound_peers_with_protect_from_disconnect = 0; + + /** When our tip was last updated. */ + int64_t g_last_tip_update = 0; + /** Relay map, protected by cs_main. */ typedef std::map<uint256, CTransactionRef> MapRelay; MapRelay mapRelay; @@ -201,6 +208,36 @@ struct CNodeState { */ bool fSupportsDesiredCmpctVersion; + /** State used to enforce CHAIN_SYNC_TIMEOUT + * Only in effect for outbound, non-manual connections, with + * m_protect == false + * Algorithm: if a peer's best known block has less work than our tip, + * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future: + * - If at timeout their best known block now has more work than our tip + * when the timeout was set, then either reset the timeout or clear it + * (after comparing against our current tip's work) + * - If at timeout their best known block still has less work than our + * tip did when the timeout was set, then send a getheaders message, + * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. + * If their best known block is still behind when that new timeout is + * reached, disconnect. + */ + struct ChainSyncTimeoutState { + //! A timeout used for checking whether our peer has sufficiently synced + int64_t m_timeout; + //! A header with the work we require on our peer's chain + const CBlockIndex * m_work_header; + //! After timeout is reached, set to true after sending getheaders + bool m_sent_getheaders; + //! Whether this peer is protected from disconnection due to a bad/slow chain + bool m_protect; + }; + + ChainSyncTimeoutState m_chain_sync; + + //! Time of last new block announcement + int64_t m_last_block_announcement; + CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) { fCurrentlyConnected = false; nMisbehavior = 0; @@ -223,6 +260,8 @@ struct CNodeState { fHaveWitness = false; fWantsCmpctWitness = false; fSupportsDesiredCmpctVersion = false; + m_chain_sync = { 0, nullptr, false, false }; + m_last_block_announcement = 0; } }; @@ -396,6 +435,15 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) { } } +bool TipMayBeStale(const Consensus::Params &consensusParams) +{ + AssertLockHeld(cs_main); + if (g_last_tip_update == 0) { + g_last_tip_update = GetTime(); + } + return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); +} + // Requires cs_main bool CanDirectFetch(const Consensus::Params &consensusParams) { @@ -502,6 +550,22 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con } // namespace +// This function is used for testing the stale tip eviction logic, see +// DoS_tests.cpp +void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) +{ + LOCK(cs_main); + CNodeState *state = State(node); + if (state) state->m_last_block_announcement = time_in_seconds; +} + +// Returns true for outbound peers, excluding manual connections, feelers, and +// one-shots +bool IsOutboundDisconnectionCandidate(const CNode *node) +{ + return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot); +} + void PeerLogicValidation::InitializeNode(CNode *pnode) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); @@ -534,6 +598,8 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); + g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; + assert(g_outbound_peers_with_protect_from_disconnect >= 0); mapNodeState.erase(nodeid); @@ -542,6 +608,7 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); + assert(g_outbound_peers_with_protect_from_disconnect == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -566,7 +633,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { // mapOrphanTransactions // -void AddToCompactExtraTransactions(const CTransactionRef& tx) +void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) @@ -714,18 +781,28 @@ void Misbehaving(NodeId pnode, int howmuch) // To prevent fingerprinting attacks, only send blocks/headers outside of the // active chain if they are no more than a month older (both in time, and in -// best equivalent proof of work) than the best header chain we know about. -static bool StaleBlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) +// best equivalent proof of work) than the best header chain we know about and +// we fully-validated them at some point. +static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) { AssertLockHeld(cs_main); - return (pindexBestHeader != nullptr) && + if (chainActive.Contains(pindex)) return true; + return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT); } -PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) { +PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &scheduler) : connman(connmanIn), m_stale_tip_check_time(0) { // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); + + const Consensus::Params& consensusParams = Params().GetConsensus(); + // Stale tip checking and peer eviction are on two different timers, but we + // don't want them to get out of sync due to drift in the scheduler, so we + // combine them in one function and schedule at the quicker (peer-eviction) + // timer. + static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); + scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000); } void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) { @@ -756,6 +833,8 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb } LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } + + g_last_tip_update = GetTime(); } // All of the following cache a recent block, and are protected by cs_most_recent_block @@ -997,14 +1076,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam CValidationState dummy; ActivateBestChain(dummy, Params(), a_recent_block); } - if (chainActive.Contains(mi->second)) { - send = true; - } else { - send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && - StaleBlockRequestAllowed(mi->second, consensusParams); - if (!send) { - LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); - } + send = BlockRequestAllowed(mi->second, consensusParams); + if (!send) { + LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); } } // disconnect node in case we have reached the outbound limit for serving historical blocks @@ -1164,6 +1238,225 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } +bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid) +{ + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + size_t nCount = headers.size(); + + if (nCount == 0) { + // Nothing interesting. Stop asking this peers for more headers. + return true; + } + + bool received_new_header = false; + const CBlockIndex *pindexLast = nullptr; + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + + // If this looks like it could be a block announcement (nCount < + // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that + // don't connect: + // - Send a getheaders message in response to try to connect the chain. + // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that + // don't connect before giving DoS points + // - Once a headers message is received that is valid and does connect, + // nUnconnectingHeaders gets reset back to 0. + if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { + nodestate->nUnconnectingHeaders++; + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", + headers[0].GetHash().ToString(), + headers[0].hashPrevBlock.ToString(), + pindexBestHeader->nHeight, + pfrom->GetId(), nodestate->nUnconnectingHeaders); + // Set hashLastUnknownBlock for this peer, so that if we + // eventually get the headers - even from a different peer - + // we can use this peer to download. + UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); + + if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { + Misbehaving(pfrom->GetId(), 20); + } + return true; + } + + uint256 hashLastBlock; + for (const CBlockHeader& header : headers) { + if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { + Misbehaving(pfrom->GetId(), 20); + return error("non-continuous headers sequence"); + } + hashLastBlock = header.GetHash(); + } + + // If we don't have the last header, then they'll have given us + // something new (if these headers are valid). + if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) { + received_new_header = true; + } + } + + CValidationState state; + CBlockHeader first_invalid_header; + if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) { + int nDoS; + if (state.IsInvalid(nDoS)) { + LOCK(cs_main); + if (nDoS > 0) { + Misbehaving(pfrom->GetId(), nDoS); + } + if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) { + // Goal: don't allow outbound peers to use up our outbound + // connection slots if they are on incompatible chains. + // + // We ask the caller to set punish_invalid appropriately based + // on the peer and the method of header delivery (compact + // blocks are allowed to be invalid in some circumstances, + // under BIP 152). + // Here, we try to detect the narrow situation that we have a + // valid block header (ie it was valid at the time the header + // was received, and hence stored in mapBlockIndex) but know the + // block is invalid, and that a peer has announced that same + // block as being on its active chain. + // Disconnect the peer in such a situation. + // + // Note: if the header that is invalid was not accepted to our + // mapBlockIndex at all, that may also be grounds for + // disconnecting the peer, as the chain they are on is likely + // to be incompatible. However, there is a circumstance where + // that does not hold: if the header's timestamp is more than + // 2 hours ahead of our current time. In that case, the header + // may become valid in the future, and we don't want to + // disconnect a peer merely for serving us one too-far-ahead + // block header, to prevent an attacker from splitting the + // network by mining a block right at the 2 hour boundary. + // + // TODO: update the DoS logic (or, rather, rewrite the + // DoS-interface between validation and net_processing) so that + // the interface is cleaner, and so that we disconnect on all the + // reasons that a peer's headers chain is incompatible + // with ours (eg block->nVersion softforks, MTP violations, + // etc), and not just the duplicate-invalid case. + pfrom->fDisconnect = true; + } + return error("invalid header received"); + } + } + + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + if (nodestate->nUnconnectingHeaders > 0) { + LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders); + } + nodestate->nUnconnectingHeaders = 0; + + assert(pindexLast); + UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); + + // From here, pindexBestKnownBlock should be guaranteed to be non-null, + // because it is set in UpdateBlockAvailability. Some nullptr checks + // are still present, however, as belt-and-suspenders. + + if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) { + nodestate->m_last_block_announcement = GetTime(); + } + + if (nCount == MAX_HEADERS_RESULTS) { + // Headers message had its maximum size; the peer may have more headers. + // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue + // from there instead. + LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); + } + + bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); + // If this set of headers is valid and ends in a block with at least as + // much work as our tip, download as much as possible. + if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { + std::vector<const CBlockIndex*> vToFetch; + const CBlockIndex *pindexWalk = pindexLast; + // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && + !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && + (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { + // We don't have this block, and it's not yet in flight. + vToFetch.push_back(pindexWalk); + } + pindexWalk = pindexWalk->pprev; + } + // If pindexWalk still isn't on our main chain, we're looking at a + // very large reorg at a time we think we're close to caught up to + // the main chain -- this shouldn't really happen. Bail out on the + // direct fetch and rely on parallel download instead. + if (!chainActive.Contains(pindexWalk)) { + LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", + pindexLast->GetBlockHash().ToString(), + pindexLast->nHeight); + } else { + std::vector<CInv> vGetData; + // Download as much as possible, from earliest to latest. + for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { + if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + // Can't download any more from this peer + break; + } + uint32_t nFetchFlags = GetFetchFlags(pfrom); + vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); + MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex); + LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", + pindex->GetBlockHash().ToString(), pfrom->GetId()); + } + if (vGetData.size() > 1) { + LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", + pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + } + if (vGetData.size() > 0) { + if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { + // In any case, we want to download using a compact block, not a regular one + vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); + } + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + } + } + } + // If we're in IBD, we want outbound peers that will serve us a useful + // chain. Disconnect peers that are on chains with insufficient work. + if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { + // When nCount < MAX_HEADERS_RESULTS, we know we have no more + // headers to fetch from this peer. + if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + // This peer has too little work on their headers chain to help + // us sync -- disconnect if using an outbound slot (unless + // whitelisted or addnode). + // Note: We compare their tip to nMinimumChainWork (rather than + // chainActive.Tip()) because we won't start block download + // until we have a headers chain that has at least + // nMinimumChainWork, even if a peer has a chain past our tip, + // as an anti-DoS measure. + if (IsOutboundDisconnectionCandidate(pfrom)) { + LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId()); + pfrom->fDisconnect = true; + } + } + } + + if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) { + // If this is an outbound peer, check to see if we should protect + // it from the bad/lagging chain logic. + if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId()); + nodestate->m_chain_sync.m_protect = true; + ++g_outbound_peers_with_protect_from_disconnect; + } + } + } + + return true; +} + bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) { LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId()); @@ -1738,8 +2031,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr return true; pindex = (*mi).second; - if (!chainActive.Contains(pindex) && - !StaleBlockRequestAllowed(pindex, chainparams.GetConsensus())) { + if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) { LogPrintf("%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId()); return true; } @@ -1810,7 +2102,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, ptx, &fMissingInputs, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) { - mempool.check(pcoinsTip); + mempool.check(pcoinsTip.get()); RelayTransaction(tx, connman); for (unsigned int i = 0; i < tx.vout.size(); i++) { vWorkQueue.emplace_back(inv.hash, i); @@ -1877,7 +2169,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr recentRejects->insert(orphanHash); } } - mempool.check(pcoinsTip); + mempool.check(pcoinsTip.get()); } } @@ -1971,6 +2263,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr CBlockHeaderAndShortTxIDs cmpctblock; vRecv >> cmpctblock; + bool received_new_header = false; + { LOCK(cs_main); @@ -1980,6 +2274,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); return true; } + + if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) { + received_new_header = true; + } } const CBlockIndex *pindex = nullptr; @@ -2006,7 +2304,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // If we end up treating this as a plain headers message, call that as well // without cs_main. bool fRevertToHeaderProcessing = false; - CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION); // Keep a CBlock for "optimistic" compactblock reconstructions (see // below) @@ -2019,6 +2316,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr assert(pindex); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); + CNodeState *nodestate = State(pfrom->GetId()); + + // If this was a new header with more work than our tip, update the + // peer's last block announcement time + if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) { + nodestate->m_last_block_announcement = GetTime(); + } + std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); @@ -2041,8 +2346,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) return true; - CNodeState *nodestate = State(pfrom->GetId()); - if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) { // Don't bother trying to process compact blocks from v1 peers // after segwit activates. @@ -2123,10 +2426,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr return true; } else { // If this was an announce-cmpctblock, we want the same treatment as a header message - // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions) - std::vector<CBlock> headers; - headers.push_back(cmpctblock.header); - vHeadersMsg << headers; fRevertToHeaderProcessing = true; } } @@ -2135,8 +2434,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (fProcessBLOCKTXN) return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc); - if (fRevertToHeaderProcessing) - return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman, interruptMsgProc); + if (fRevertToHeaderProcessing) { + // Headers received from HB compact block peers are permitted to be + // relayed before full validation (see BIP 152), so we don't want to disconnect + // the peer if the header turns out to be for an invalid block. + // Note that if a peer tries to build on an invalid chain, that + // will be detected and the peer will be banned. + return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false); + } if (fBlockReconstructed) { // If we got here, we were able to optimistically reconstruct a @@ -2146,7 +2451,16 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false)); } bool fNewBlock = false; - ProcessNewBlock(chainparams, pblock, true, &fNewBlock); + // Setting fForceProcessing to true means that we bypass some of + // our anti-DoS protections in AcceptBlock, which filters + // unrequested blocks that might be trying to waste our resources + // (eg disk space). Because we only try to reconstruct blocks when + // we're close to caught up (via the CanDirectFetch() requirement + // above, combined with the behavior of not requesting blocks until + // we have a chain with at least nMinimumChainWork), and we ignore + // compact blocks with less work than our tip, it is safe to treat + // reconstructed compact blocks as having been requested. + ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } else { @@ -2226,7 +2540,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr bool fNewBlock = false; // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) - ProcessNewBlock(chainparams, pblock, true, &fNewBlock); + // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent + // disk-space attacks), but this should be safe due to the + // protections in the compact block handler -- see related comment + // in compact block optimistic reconstruction handling. + ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } else { @@ -2254,136 +2572,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr ReadCompactSize(vRecv); // ignore tx count; assume it is 0. } - if (nCount == 0) { - // Nothing interesting. Stop asking this peers for more headers. - return true; - } - - const CBlockIndex *pindexLast = nullptr; - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom->GetId()); - - // If this looks like it could be a block announcement (nCount < - // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that - // don't connect: - // - Send a getheaders message in response to try to connect the chain. - // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that - // don't connect before giving DoS points - // - Once a headers message is received that is valid and does connect, - // nUnconnectingHeaders gets reset back to 0. - if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { - nodestate->nUnconnectingHeaders++; - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); - LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", - headers[0].GetHash().ToString(), - headers[0].hashPrevBlock.ToString(), - pindexBestHeader->nHeight, - pfrom->GetId(), nodestate->nUnconnectingHeaders); - // Set hashLastUnknownBlock for this peer, so that if we - // eventually get the headers - even from a different peer - - // we can use this peer to download. - UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); - - if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { - Misbehaving(pfrom->GetId(), 20); - } - return true; - } - - uint256 hashLastBlock; - for (const CBlockHeader& header : headers) { - if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { - Misbehaving(pfrom->GetId(), 20); - return error("non-continuous headers sequence"); - } - hashLastBlock = header.GetHash(); - } - } - - CValidationState state; - if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) { - int nDoS; - if (state.IsInvalid(nDoS)) { - if (nDoS > 0) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), nDoS); - } - return error("invalid header received"); - } - } - - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom->GetId()); - if (nodestate->nUnconnectingHeaders > 0) { - LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders); - } - nodestate->nUnconnectingHeaders = 0; - - assert(pindexLast); - UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); - - if (nCount == MAX_HEADERS_RESULTS) { - // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue - // from there instead. - LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight); - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); - } - - bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); - // If this set of headers is valid and ends in a block with at least as - // much work as our tip, download as much as possible. - if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { - std::vector<const CBlockIndex*> vToFetch; - const CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. - while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && - (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { - // We don't have this block, and it's not yet in flight. - vToFetch.push_back(pindexWalk); - } - pindexWalk = pindexWalk->pprev; - } - // If pindexWalk still isn't on our main chain, we're looking at a - // very large reorg at a time we think we're close to caught up to - // the main chain -- this shouldn't really happen. Bail out on the - // direct fetch and rely on parallel download instead. - if (!chainActive.Contains(pindexWalk)) { - LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); - } else { - std::vector<CInv> vGetData; - // Download as much as possible, from earliest to latest. - for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { - if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - // Can't download any more from this peer - break; - } - uint32_t nFetchFlags = GetFetchFlags(pfrom); - vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex); - LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", - pindex->GetBlockHash().ToString(), pfrom->GetId()); - } - if (vGetData.size() > 1) { - LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); - } - if (vGetData.size() > 0) { - if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { - // In any case, we want to download using a compact block, not a regular one - vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); - } - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); - } - } - } - } + // Headers received via a HEADERS message should be valid, and reflect + // the chain the peer is on. If we receive a known-invalid header, + // disconnect the peer if it is using one of our outbound connection + // slots. + bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection; + return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish); } else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing @@ -2393,11 +2587,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId()); - // Process all blocks from whitelisted peers, even if not requested, - // unless we're still syncing with the network. - // Such an unrequested block may still be processed, subject to the - // conditions in AcceptBlock(). - bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); + bool forceProcessing = false; const uint256 hash(pblock->GetHash()); { LOCK(cs_main); @@ -2561,8 +2751,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr else { LOCK(pfrom->cs_filter); - delete pfrom->pfilter; - pfrom->pfilter = new CBloomFilter(filter); + pfrom->pfilter.reset(new CBloomFilter(filter)); pfrom->pfilter->UpdateEmptyFull(); pfrom->fRelayTxes = true; } @@ -2598,8 +2787,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr { LOCK(pfrom->cs_filter); if (pfrom->GetLocalServices() & NODE_BLOOM) { - delete pfrom->pfilter; - pfrom->pfilter = new CBloomFilter(); + pfrom->pfilter.reset(new CBloomFilter()); } pfrom->fRelayTxes = true; } @@ -2781,6 +2969,135 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter return fMoreWork; } +void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds) +{ + AssertLockHeld(cs_main); + + CNodeState &state = *State(pto->GetId()); + const CNetMsgMaker msgMaker(pto->GetSendVersion()); + + if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) { + // This is an outbound peer subject to disconnection if they don't + // announce a block with as much work as the current tip within + // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if + // their chain has more work than ours, we should sync to it, + // unless it's invalid, in which case we should find that out and + // disconnect from them elsewhere). + if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) { + if (state.m_chain_sync.m_timeout != 0) { + state.m_chain_sync.m_timeout = 0; + state.m_chain_sync.m_work_header = nullptr; + state.m_chain_sync.m_sent_getheaders = false; + } + } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { + // Our best block known by this peer is behind our tip, and we're either noticing + // that for the first time, OR this peer was able to catch up to some earlier point + // where we checked against our tip. + // Either way, set a new timeout based on current tip. + state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; + state.m_chain_sync.m_work_header = chainActive.Tip(); + state.m_chain_sync.m_sent_getheaders = false; + } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) { + // No evidence yet that our peer has synced to a chain with work equal to that + // of our tip, when we first detected it was behind. Send a single getheaders + // message to give the peer a chance to update us. + if (state.m_chain_sync.m_sent_getheaders) { + // They've run out of time to catch up! + LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>"); + pto->fDisconnect = true; + } else { + LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); + connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256())); + state.m_chain_sync.m_sent_getheaders = true; + constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes + // Bump the timeout to allow a response, which could clear the timeout + // (if the response shows the peer has synced), reset the timeout (if + // the peer syncs to the required work but not to our tip), or result + // in disconnect (if we advance to the timeout and pindexBestKnownBlock + // has not sufficiently progressed) + state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; + } + } + } +} + +void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) +{ + // Check whether we have too many outbound peers + int extra_peers = connman->GetExtraOutboundCount(); + if (extra_peers > 0) { + // If we have more outbound peers than we target, disconnect one. + // Pick the outbound peer that least recently announced + // us a new block, with ties broken by choosing the more recent + // connection (higher node id) + NodeId worst_peer = -1; + int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max(); + + LOCK(cs_main); + + connman->ForEachNode([&](CNode* pnode) { + // Ignore non-outbound peers, or nodes marked for disconnect already + if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return; + CNodeState *state = State(pnode->GetId()); + if (state == nullptr) return; // shouldn't be possible, but just in case + // Don't evict our protected peers + if (state->m_chain_sync.m_protect) return; + if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { + worst_peer = pnode->GetId(); + oldest_block_announcement = state->m_last_block_announcement; + } + }); + if (worst_peer != -1) { + bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) { + // Only disconnect a peer that has been connected to us for + // some reasonable fraction of our check-frequency, to give + // it time for new information to have arrived. + // Also don't disconnect any peer we're trying to download a + // block from. + CNodeState &state = *State(pnode->GetId()); + if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) { + LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); + pnode->fDisconnect = true; + return true; + } else { + LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight); + return false; + } + }); + if (disconnected) { + // If we disconnected an extra peer, that means we successfully + // connected to at least one peer after the last time we + // detected a stale tip. Don't try any more extra peers until + // we next detect a stale tip, to limit the load we put on the + // network from these extra connections. + connman->SetTryNewOutboundPeer(false); + } + } + } +} + +void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams) +{ + if (connman == nullptr) return; + + int64_t time_in_seconds = GetTime(); + + EvictExtraOutboundPeers(time_in_seconds); + + if (time_in_seconds > m_stale_tip_check_time) { + LOCK(cs_main); + // Check whether our tip is stale, and if so, allow using an extra + // outbound peer + if (TipMayBeStale(consensusParams)) { + LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); + connman->SetTryNewOutboundPeer(true); + } else if (connman->GetTryNewOutboundPeer()) { + connman->SetTryNewOutboundPeer(false); + } + m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL; + } +} + class CompareInvMempoolOrder { CTxMemPool *mp; @@ -3247,6 +3564,9 @@ bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptM } } + // Check that outbound peers have reasonable chains + // GetTime() is used by this anti-DoS logic so we can test this using mocktime + ConsiderEviction(pto, GetTime()); // // Message: getdata (blocks) diff --git a/src/net_processing.h b/src/net_processing.h index 79745cdd42..0a49972eed 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -8,6 +8,7 @@ #include "net.h" #include "validationinterface.h" +#include "consensus/params.h" /** Default for -maxorphantx, maximum number of orphan transactions kept in memory */ static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100; @@ -21,13 +22,25 @@ static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100; * Timeout = base + per_header * (expected number of headers) */ static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header +/** Protect at least this many outbound peers from disconnection due to slow/ + * behind headers chain. + */ +static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; +/** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */ +static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes +/** How frequently to check for stale tips, in seconds */ +static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes +/** How frequently to check for extra outbound peers and disconnect, in seconds */ +static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45; +/** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict, in seconds */ +static constexpr int64_t MINIMUM_CONNECT_TIME = 30; class PeerLogicValidation : public CValidationInterface, public NetEventsInterface { private: - CConnman* connman; + CConnman* const connman; public: - explicit PeerLogicValidation(CConnman* connman); + explicit PeerLogicValidation(CConnman* connman, CScheduler &scheduler); void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override; void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override; @@ -47,6 +60,13 @@ public: * @return True if there is more work to be done */ bool SendMessages(CNode* pto, std::atomic<bool>& interrupt) override; + + void ConsiderEviction(CNode *pto, int64_t time_in_seconds); + void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams); + void EvictExtraOutboundPeers(int64_t time_in_seconds); + +private: + int64_t m_stale_tip_check_time; //! Next time to check for stale tip }; struct CNodeStateStats { diff --git a/src/netbase.cpp b/src/netbase.cpp index 5a560bc95a..82040605c5 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -291,7 +291,7 @@ struct ProxyCredentials std::string password; }; -/** Convert SOCKS5 reply to a an error message */ +/** Convert SOCKS5 reply to an error message */ std::string Socks5ErrorString(uint8_t err) { switch(err) { diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index c7e57671c0..a459186359 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -548,16 +548,13 @@ CBlockPolicyEstimator::CBlockPolicyEstimator() bucketMap[INF_FEERATE] = bucketIndex; assert(bucketMap.size() == buckets.size()); - feeStats = new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE); - shortStats = new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE); - longStats = new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE); + feeStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE)); + shortStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE)); + longStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE)); } CBlockPolicyEstimator::~CBlockPolicyEstimator() { - delete feeStats; - delete shortStats; - delete longStats; } void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate) @@ -690,16 +687,16 @@ CFeeRate CBlockPolicyEstimator::estimateRawFee(int confTarget, double successThr double sufficientTxs = SUFFICIENT_FEETXS; switch (horizon) { case FeeEstimateHorizon::SHORT_HALFLIFE: { - stats = shortStats; + stats = shortStats.get(); sufficientTxs = SUFFICIENT_TXS_SHORT; break; } case FeeEstimateHorizon::MED_HALFLIFE: { - stats = feeStats; + stats = feeStats.get(); break; } case FeeEstimateHorizon::LONG_HALFLIFE: { - stats = longStats; + stats = longStats.get(); break; } default: { @@ -1002,12 +999,9 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein) } // Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap - delete feeStats; - delete shortStats; - delete longStats; - feeStats = fileFeeStats.release(); - shortStats = fileShortStats.release(); - longStats = fileLongStats.release(); + feeStats = std::move(fileFeeStats); + shortStats = std::move(fileShortStats); + longStats = std::move(fileLongStats); nBestSeenHeight = nFileBestSeenHeight; historicalFirst = nFileHistoricalFirst; diff --git a/src/policy/fees.h b/src/policy/fees.h index 6edaf28714..9c0937cbd6 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -245,9 +245,9 @@ private: std::map<uint256, TxStatsInfo> mapMemPoolTxs; /** Classes to track historical data on transaction confirmations */ - TxConfirmStats* feeStats; - TxConfirmStats* shortStats; - TxConfirmStats* longStats; + std::unique_ptr<TxConfirmStats> feeStats; + std::unique_ptr<TxConfirmStats> shortStats; + std::unique_ptr<TxConfirmStats> longStats; unsigned int trackedTxs; unsigned int untrackedTxs; diff --git a/src/protocol.h b/src/protocol.h index 56b59aed3f..558616eaf8 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -27,16 +27,13 @@ class CMessageHeader { public: - enum { - MESSAGE_START_SIZE = 4, - COMMAND_SIZE = 12, - MESSAGE_SIZE_SIZE = 4, - CHECKSUM_SIZE = 4, - - MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE, - CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE, - HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE - }; + static constexpr size_t MESSAGE_START_SIZE = 4; + static constexpr size_t COMMAND_SIZE = 12; + static constexpr size_t MESSAGE_SIZE_SIZE = 4; + static constexpr size_t CHECKSUM_SIZE = 4; + static constexpr size_t MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE; + static constexpr size_t CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE; + static constexpr size_t HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE; typedef unsigned char MessageStartChars[MESSAGE_START_SIZE]; explicit CMessageHeader(const MessageStartChars& pchMessageStartIn); diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp index e9f5c77a5b..d6cce09e8d 100644 --- a/src/qt/askpassphrasedialog.cpp +++ b/src/qt/askpassphrasedialog.cpp @@ -70,6 +70,7 @@ AskPassphraseDialog::AskPassphraseDialog(Mode _mode, QWidget *parent) : break; } textChanged(); + connect(ui->toggleShowPasswordButton, SIGNAL(toggled(bool)), this, SLOT(toggleShowPassword(bool))); connect(ui->passEdit1, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); connect(ui->passEdit2, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); connect(ui->passEdit3, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); @@ -234,6 +235,15 @@ bool AskPassphraseDialog::event(QEvent *event) return QWidget::event(event); } +void AskPassphraseDialog::toggleShowPassword(bool show) +{ + ui->toggleShowPasswordButton->setDown(show); + const auto mode = show ? QLineEdit::Normal : QLineEdit::Password; + ui->passEdit1->setEchoMode(mode); + ui->passEdit2->setEchoMode(mode); + ui->passEdit3->setEchoMode(mode); +} + bool AskPassphraseDialog::eventFilter(QObject *object, QEvent *event) { /* Detect Caps Lock. diff --git a/src/qt/askpassphrasedialog.h b/src/qt/askpassphrasedialog.h index 34bf7ccb31..7c6acc4650 100644 --- a/src/qt/askpassphrasedialog.h +++ b/src/qt/askpassphrasedialog.h @@ -43,6 +43,7 @@ private: private Q_SLOTS: void textChanged(); void secureClearPassFields(); + void toggleShowPassword(bool); protected: bool event(QEvent *event); diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp index 6952eb5064..207e441b6b 100644 --- a/src/qt/coincontroldialog.cpp +++ b/src/qt/coincontroldialog.cpp @@ -582,8 +582,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog) QString toolTipDust = tr("This label turns red if any recipient receives an amount smaller than the current dust threshold."); // how many satoshis the estimated fee can vary per byte we guess wrong - assert(nBytes != 0); - double dFeeVary = (double)nPayFee / nBytes; + double dFeeVary = (nBytes != 0) ? (double)nPayFee / nBytes : 0; QString toolTip4 = tr("Can vary +/- %1 satoshi(s) per input.").arg(dFeeVary); diff --git a/src/qt/forms/askpassphrasedialog.ui b/src/qt/forms/askpassphrasedialog.ui index a2105ecd0a..69803989cd 100644 --- a/src/qt/forms/askpassphrasedialog.ui +++ b/src/qt/forms/askpassphrasedialog.ui @@ -93,6 +93,13 @@ </widget> </item> <item row="3" column="1"> + <widget class="QCheckBox" name="toggleShowPasswordButton"> + <property name="text"> + <string>Show password</string> + </property> + </widget> + </item> + <item row="4" column="1"> <widget class="QLabel" name="capsLabel"> <property name="font"> <font> diff --git a/src/qt/forms/receivecoinsdialog.ui b/src/qt/forms/receivecoinsdialog.ui index 03fcb2fb50..2ca296bc22 100644 --- a/src/qt/forms/receivecoinsdialog.ui +++ b/src/qt/forms/receivecoinsdialog.ui @@ -28,23 +28,6 @@ <layout class="QVBoxLayout" name="verticalLayout_3"> <item> <layout class="QGridLayout" name="gridLayout"> - <item row="7" column="2"> - <widget class="QCheckBox" name="reuseAddress"> - <property name="toolTip"> - <string>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</string> - </property> - <property name="text"> - <string>R&euse an existing receiving address (not recommended)</string> - </property> - </widget> - </item> - <item row="7" column="0"> - <widget class="QLabel" name="label_4"> - <property name="text"> - <string/> - </property> - </widget> - </item> <item row="6" column="0"> <widget class="QLabel" name="label_3"> <property name="toolTip"> @@ -127,7 +110,7 @@ </property> </widget> </item> - <item row="8" column="2"> + <item row="7" column="2"> <layout class="QHBoxLayout" name="horizontalLayout"> <item> <widget class="QPushButton" name="receiveButton"> @@ -184,7 +167,7 @@ </item> </layout> </item> - <item row="8" column="0"> + <item row="7" column="0"> <widget class="QLabel" name="label_7"> <property name="text"> <string/> @@ -324,7 +307,6 @@ <tabstop>reqLabel</tabstop> <tabstop>reqAmount</tabstop> <tabstop>reqMessage</tabstop> - <tabstop>reuseAddress</tabstop> <tabstop>receiveButton</tabstop> <tabstop>clearButton</tabstop> <tabstop>recentRequestsView</tabstop> diff --git a/src/qt/forms/sendcoinsentry.ui b/src/qt/forms/sendcoinsentry.ui index df06f36833..d0748c2edb 100644 --- a/src/qt/forms/sendcoinsentry.ui +++ b/src/qt/forms/sendcoinsentry.ui @@ -163,7 +163,7 @@ </widget> </item> <item row="2" column="1"> - <layout class="QHBoxLayout" name="horizontalLayoutAmount" stretch="0,1"> + <layout class="QHBoxLayout" name="horizontalLayoutAmount" stretch="0,1,0"> <item> <widget class="BitcoinAmountField" name="payAmount"/> </item> @@ -177,6 +177,13 @@ </property> </widget> </item> + <item> + <widget class="QPushButton" name="useAvailableBalanceButton"> + <property name="text"> + <string>Use available balance</string> + </property> + </widget> + </item> </layout> </item> <item row="3" column="0"> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index d520d7d4be..4bd63f4649 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -984,6 +984,18 @@ QString formatNiceTimeOffset(qint64 secs) return timeBehindText; } +QString formatBytes(uint64_t bytes) +{ + if(bytes < 1024) + return QString(QObject::tr("%1 B")).arg(bytes); + if(bytes < 1024 * 1024) + return QString(QObject::tr("%1 KB")).arg(bytes / 1024); + if(bytes < 1024 * 1024 * 1024) + return QString(QObject::tr("%1 MB")).arg(bytes / 1024 / 1024); + + return QString(QObject::tr("%1 GB")).arg(bytes / 1024 / 1024 / 1024); +} + void ClickableLabel::mouseReleaseEvent(QMouseEvent *event) { Q_EMIT clicked(event->pos()); diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index d10818d0c8..7622816f7f 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -199,6 +199,8 @@ namespace GUIUtil QString formatNiceTimeOffset(qint64 secs); + QString formatBytes(uint64_t bytes); + class ClickableLabel : public QLabel { Q_OBJECT diff --git a/src/qt/paymentrequestplus.cpp b/src/qt/paymentrequestplus.cpp index d3799f59ab..c7f92a0921 100644 --- a/src/qt/paymentrequestplus.cpp +++ b/src/qt/paymentrequestplus.cpp @@ -194,8 +194,7 @@ bool PaymentRequestPlus::getMerchant(X509_STORE* certStore, QString& merchant) c qWarning() << "PaymentRequestPlus::getMerchant: SSL error: " << err.what(); } - if (website) - delete[] website; + delete[] website; X509_STORE_CTX_free(store_ctx); for (unsigned int i = 0; i < certs.size(); i++) X509_free(certs[i]); diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 169684cf6d..506e49af0d 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -362,8 +362,7 @@ void PaymentServer::initNetManager() { if (!optionsModel) return; - if (netManager != nullptr) - delete netManager; + delete netManager; // netManager is used to fetch paymentrequests given in bitcoin: URIs netManager = new QNetworkAccessManager(this); diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index 42934f8055..8b2a7e7047 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -33,6 +33,10 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine return pLeft->cleanSubVer.compare(pRight->cleanSubVer) < 0; case PeerTableModel::Ping: return pLeft->dMinPing < pRight->dMinPing; + case PeerTableModel::Sent: + return pLeft->nSendBytes < pRight->nSendBytes; + case PeerTableModel::Received: + return pLeft->nRecvBytes < pRight->nRecvBytes; } return false; @@ -114,7 +118,7 @@ PeerTableModel::PeerTableModel(ClientModel *parent) : clientModel(parent), timer(0) { - columns << tr("NodeId") << tr("Node/Service") << tr("User Agent") << tr("Ping"); + columns << tr("NodeId") << tr("Node/Service") << tr("Ping") << tr("Sent") << tr("Received") << tr("User Agent"); priv.reset(new PeerTablePriv()); // default to unsorted priv->sortColumn = -1; @@ -173,10 +177,20 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const return QString::fromStdString(rec->nodeStats.cleanSubVer); case Ping: return GUIUtil::formatPingTime(rec->nodeStats.dMinPing); + case Sent: + return GUIUtil::formatBytes(rec->nodeStats.nSendBytes); + case Received: + return GUIUtil::formatBytes(rec->nodeStats.nRecvBytes); } } else if (role == Qt::TextAlignmentRole) { - if (index.column() == Ping) - return (QVariant)(Qt::AlignRight | Qt::AlignVCenter); + switch (index.column()) { + case Ping: + case Sent: + case Received: + return QVariant(Qt::AlignRight | Qt::AlignVCenter); + default: + return QVariant(); + } } return QVariant(); diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h index cc47b67ec9..ec91d07127 100644 --- a/src/qt/peertablemodel.h +++ b/src/qt/peertablemodel.h @@ -55,8 +55,10 @@ public: enum ColumnIndex { NetNodeId = 0, Address = 1, - Subversion = 2, - Ping = 3 + Ping = 2, + Sent = 3, + Received = 4, + Subversion = 5 }; /** @name Methods overridden from QAbstractTableModel diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp index 84f43266e1..4e1a9b172f 100644 --- a/src/qt/receivecoinsdialog.cpp +++ b/src/qt/receivecoinsdialog.cpp @@ -106,7 +106,6 @@ void ReceiveCoinsDialog::clear() ui->reqAmount->clear(); ui->reqLabel->setText(""); ui->reqMessage->setText(""); - ui->reuseAddress->setChecked(false); updateDisplayUnit(); } @@ -135,25 +134,8 @@ void ReceiveCoinsDialog::on_receiveButton_clicked() QString address; QString label = ui->reqLabel->text(); - if(ui->reuseAddress->isChecked()) - { - /* Choose existing receiving address */ - AddressBookPage dlg(platformStyle, AddressBookPage::ForSelection, AddressBookPage::ReceivingTab, this); - dlg.setModel(model->getAddressTableModel()); - if(dlg.exec()) - { - address = dlg.getReturnValue(); - if(label.isEmpty()) /* If no label provided, use the previously used label */ - { - label = model->getAddressTableModel()->labelForAddress(address); - } - } else { - return; - } - } else { - /* Generate new receiving address */ - address = model->getAddressTableModel()->addRow(AddressTableModel::Receive, label, ""); - } + /* Generate new receiving address */ + address = model->getAddressTableModel()->addRow(AddressTableModel::Receive, label, ""); SendCoinsRecipient info(address, label, ui->reqAmount->value(), ui->reqMessage->text()); ReceiveRequestDialog *dialog = new ReceiveRequestDialog(this); diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index d895fc1663..068c40e1e6 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -935,18 +935,6 @@ void RPCConsole::on_sldGraphRange_valueChanged(int value) setTrafficGraphRange(mins); } -QString RPCConsole::FormatBytes(quint64 bytes) -{ - if(bytes < 1024) - return QString(tr("%1 B")).arg(bytes); - if(bytes < 1024 * 1024) - return QString(tr("%1 KB")).arg(bytes / 1024); - if(bytes < 1024 * 1024 * 1024) - return QString(tr("%1 MB")).arg(bytes / 1024 / 1024); - - return QString(tr("%1 GB")).arg(bytes / 1024 / 1024 / 1024); -} - void RPCConsole::setTrafficGraphRange(int mins) { ui->trafficGraph->setGraphRangeMins(mins); @@ -955,8 +943,8 @@ void RPCConsole::setTrafficGraphRange(int mins) void RPCConsole::updateTrafficStats(quint64 totalBytesIn, quint64 totalBytesOut) { - ui->lblBytesIn->setText(FormatBytes(totalBytesIn)); - ui->lblBytesOut->setText(FormatBytes(totalBytesOut)); + ui->lblBytesIn->setText(GUIUtil::formatBytes(totalBytesIn)); + ui->lblBytesOut->setText(GUIUtil::formatBytes(totalBytesOut)); } void RPCConsole::peerSelected(const QItemSelection &selected, const QItemSelection &deselected) @@ -1050,8 +1038,8 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats) ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices)); ui->peerLastSend->setText(stats->nodeStats.nLastSend ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastSend) : tr("never")); ui->peerLastRecv->setText(stats->nodeStats.nLastRecv ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastRecv) : tr("never")); - ui->peerBytesSent->setText(FormatBytes(stats->nodeStats.nSendBytes)); - ui->peerBytesRecv->setText(FormatBytes(stats->nodeStats.nRecvBytes)); + ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes)); + ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes)); ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected)); ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingTime)); ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingWait)); diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index da06818f87..ad6e84a44a 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -123,7 +123,6 @@ Q_SIGNALS: void cmdRequest(const QString &command); private: - static QString FormatBytes(quint64 bytes); void startExecutor(); void setTrafficGraphRange(int mins); /** show detailed information on ui about selected node */ diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 6309070fef..0d4cab2091 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -402,6 +402,7 @@ SendCoinsEntry *SendCoinsDialog::addEntry() entry->setModel(model); ui->entries->addWidget(entry); connect(entry, SIGNAL(removeEntry(SendCoinsEntry*)), this, SLOT(removeEntry(SendCoinsEntry*))); + connect(entry, SIGNAL(useAvailableBalance(SendCoinsEntry*)), this, SLOT(useAvailableBalance(SendCoinsEntry*))); connect(entry, SIGNAL(payAmountChanged()), this, SLOT(coinControlUpdateLabels())); connect(entry, SIGNAL(subtractFeeFromAmountChanged()), this, SLOT(coinControlUpdateLabels())); @@ -599,6 +600,31 @@ void SendCoinsDialog::on_buttonMinimizeFee_clicked() minimizeFeeSection(true); } +void SendCoinsDialog::useAvailableBalance(SendCoinsEntry* entry) +{ + // Get CCoinControl instance if CoinControl is enabled or create a new one. + CCoinControl coin_control; + if (model->getOptionsModel()->getCoinControlFeatures()) { + coin_control = *CoinControlDialog::coinControl; + } + + // Calculate available amount to send. + CAmount amount = model->getBalance(&coin_control); + for (int i = 0; i < ui->entries->count(); ++i) { + SendCoinsEntry* e = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget()); + if (e && !e->isHidden() && e != entry) { + amount -= e->getValue().amount; + } + } + + if (amount > 0) { + entry->checkSubtractFeeFromAmount(); + entry->setAmount(amount); + } else { + entry->setAmount(0); + } +} + void SendCoinsDialog::setMinimumFee() { ui->customFee->setValue(GetRequiredFee(1000)); diff --git a/src/qt/sendcoinsdialog.h b/src/qt/sendcoinsdialog.h index 70b4aa5a03..2ae6ccfa64 100644 --- a/src/qt/sendcoinsdialog.h +++ b/src/qt/sendcoinsdialog.h @@ -76,6 +76,7 @@ private Q_SLOTS: void on_buttonChooseFee_clicked(); void on_buttonMinimizeFee_clicked(); void removeEntry(SendCoinsEntry* entry); + void useAvailableBalance(SendCoinsEntry* entry); void updateDisplayUnit(); void coinControlFeatureChanged(bool); void coinControlButtonClicked(); diff --git a/src/qt/sendcoinsentry.cpp b/src/qt/sendcoinsentry.cpp index bb0f47b21c..8cf8db8030 100644 --- a/src/qt/sendcoinsentry.cpp +++ b/src/qt/sendcoinsentry.cpp @@ -48,6 +48,7 @@ SendCoinsEntry::SendCoinsEntry(const PlatformStyle *_platformStyle, QWidget *par connect(ui->deleteButton, SIGNAL(clicked()), this, SLOT(deleteClicked())); connect(ui->deleteButton_is, SIGNAL(clicked()), this, SLOT(deleteClicked())); connect(ui->deleteButton_s, SIGNAL(clicked()), this, SLOT(deleteClicked())); + connect(ui->useAvailableBalanceButton, SIGNAL(clicked()), this, SLOT(useAvailableBalanceClicked())); } SendCoinsEntry::~SendCoinsEntry() @@ -112,11 +113,21 @@ void SendCoinsEntry::clear() updateDisplayUnit(); } +void SendCoinsEntry::checkSubtractFeeFromAmount() +{ + ui->checkboxSubtractFeeFromAmount->setChecked(true); +} + void SendCoinsEntry::deleteClicked() { Q_EMIT removeEntry(this); } +void SendCoinsEntry::useAvailableBalanceClicked() +{ + Q_EMIT useAvailableBalance(this); +} + bool SendCoinsEntry::validate() { if (!model) @@ -228,6 +239,11 @@ void SendCoinsEntry::setAddress(const QString &address) ui->payAmount->setFocus(); } +void SendCoinsEntry::setAmount(const CAmount &amount) +{ + ui->payAmount->setValue(amount); +} + bool SendCoinsEntry::isClear() { return ui->payTo->text().isEmpty() && ui->payTo_is->text().isEmpty() && ui->payTo_s->text().isEmpty(); diff --git a/src/qt/sendcoinsentry.h b/src/qt/sendcoinsentry.h index a8be670c2a..557fea4e54 100644 --- a/src/qt/sendcoinsentry.h +++ b/src/qt/sendcoinsentry.h @@ -38,6 +38,7 @@ public: void setValue(const SendCoinsRecipient &value); void setAddress(const QString &address); + void setAmount(const CAmount &amount); /** Set up the tab chain manually, as Qt messes up the tab chain by default in some cases * (issue https://bugreports.qt-project.org/browse/QTBUG-10907). @@ -48,14 +49,17 @@ public: public Q_SLOTS: void clear(); + void checkSubtractFeeFromAmount(); Q_SIGNALS: void removeEntry(SendCoinsEntry *entry); + void useAvailableBalance(SendCoinsEntry* entry); void payAmountChanged(); void subtractFeeFromAmountChanged(); private Q_SLOTS: void deleteClicked(); + void useAvailableBalanceClicked(); void on_payTo_textChanged(const QString &address); void on_addressBookButton_clicked(); void on_pasteButton_clicked(); diff --git a/src/qt/test/rpcnestedtests.h b/src/qt/test/rpcnestedtests.h index 9ad409019f..59d7f37841 100644 --- a/src/qt/test/rpcnestedtests.h +++ b/src/qt/test/rpcnestedtests.h @@ -17,9 +17,6 @@ class RPCNestedTests : public QObject private Q_SLOTS: void rpcNestedTests(); - -private: - CCoinsViewDB *pcoinsdbview; }; #endif // BITCOIN_QT_TEST_RPC_NESTED_TESTS_H diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index 12755d43e4..4289bf5d87 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -164,7 +164,10 @@ void TestGUI() wallet.SetAddressBook(test.coinbaseKey.GetPubKey().GetID(), "", "receive"); wallet.AddKeyPubKey(test.coinbaseKey, test.coinbaseKey.GetPubKey()); } - wallet.ScanForWalletTransactions(chainActive.Genesis(), nullptr, true); + { + LOCK(cs_main); + wallet.ScanForWalletTransactions(chainActive.Genesis(), nullptr, true); + } wallet.SetBroadcastTransactions(true); // Create widgets for sending coins and listing transactions. diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp index 74f5c774a0..41ee89de09 100644 --- a/src/qt/transactiondesc.cpp +++ b/src/qt/transactiondesc.cpp @@ -24,7 +24,7 @@ QString TransactionDesc::FormatTxStatus(const CWalletTx& wtx) { AssertLockHeld(cs_main); - if (!CheckFinalTx(wtx)) + if (!CheckFinalTx(*wtx.tx)) { if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD) return tr("Open for %n more block(s)", "", wtx.tx->nLockTime - chainActive.Height()); diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index d40ffd22cd..8ac00ac4cf 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -182,7 +182,7 @@ void TransactionRecord::updateStatus(const CWalletTx &wtx) status.depth = wtx.GetDepthInMainChain(); status.cur_num_blocks = chainActive.Height(); - if (!CheckFinalTx(wtx)) + if (!CheckFinalTx(*wtx.tx)) { if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD) { diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index 59cef555b1..e83d824a6a 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -230,7 +230,7 @@ public: std::map<uint256, CWalletTx>::iterator mi = wallet->mapWallet.find(rec->hash); if(mi != wallet->mapWallet.end()) { - std::string strHex = EncodeHexTx(static_cast<CTransaction>(mi->second)); + std::string strHex = EncodeHexTx(*mi->second.tx); return QString::fromStdString(strHex); } return QString(); diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 53b1c2967c..e1d0660627 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -661,7 +661,7 @@ bool WalletModel::transactionCanBeBumped(uint256 hash) const { LOCK2(cs_main, wallet->cs_wallet); const CWalletTx *wtx = wallet->GetWalletTx(hash); - return wtx && SignalsOptInRBF(*wtx) && !wtx->mapValue.count("replaced_by_txid"); + return wtx && SignalsOptInRBF(*(wtx->tx)) && !wtx->mapValue.count("replaced_by_txid"); } bool WalletModel::bumpFee(uint256 hash) diff --git a/src/qt/walletmodeltransaction.cpp b/src/qt/walletmodeltransaction.cpp index eae2c27f8a..a2290535c7 100644 --- a/src/qt/walletmodeltransaction.cpp +++ b/src/qt/walletmodeltransaction.cpp @@ -34,7 +34,7 @@ CWalletTx *WalletModelTransaction::getTransaction() const unsigned int WalletModelTransaction::getTransactionSize() { - return (!walletTransaction ? 0 : ::GetVirtualTransactionSize(*walletTransaction)); + return (!walletTransaction ? 0 : ::GetVirtualTransactionSize(*walletTransaction->tx)); } CAmount WalletModelTransaction::getTransactionFee() const diff --git a/src/rest.cpp b/src/rest.cpp index 4d2cdfdf08..b1fc96bdf5 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -178,8 +178,11 @@ static bool rest_headers(HTTPRequest* req, } case RF_JSON: { UniValue jsonHeaders(UniValue::VARR); - for (const CBlockIndex *pindex : headers) { - jsonHeaders.push_back(blockheaderToJSON(pindex)); + { + LOCK(cs_main); + for (const CBlockIndex *pindex : headers) { + jsonHeaders.push_back(blockheaderToJSON(pindex)); + } } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); @@ -239,7 +242,11 @@ static bool rest_block(HTTPRequest* req, } case RF_JSON: { - UniValue objBlock = blockToJSON(block, pblockindex, showTxDetails); + UniValue objBlock; + { + LOCK(cs_main); + objBlock = blockToJSON(block, pblockindex, showTxDetails); + } std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 68af376f35..8fff937d82 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -78,6 +78,7 @@ double GetDifficulty(const CBlockIndex* blockindex) UniValue blockheaderToJSON(const CBlockIndex* blockindex) { + AssertLockHeld(cs_main); UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; @@ -106,6 +107,7 @@ UniValue blockheaderToJSON(const CBlockIndex* blockindex) UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails) { + AssertLockHeld(cs_main); UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; @@ -926,7 +928,7 @@ UniValue gettxoutsetinfo(const JSONRPCRequest& request) CCoinsStats stats; FlushStateToDisk(); - if (GetUTXOStats(pcoinsdbview, stats)) { + if (GetUTXOStats(pcoinsdbview.get(), stats)) { ret.push_back(Pair("height", (int64_t)stats.nHeight)); ret.push_back(Pair("bestblock", stats.hashBlock.GetHex())); ret.push_back(Pair("transactions", (int64_t)stats.nTransactions)); @@ -994,7 +996,7 @@ UniValue gettxout(const JSONRPCRequest& request) Coin coin; if (fMempool) { LOCK(mempool.cs); - CCoinsViewMemPool view(pcoinsTip, mempool); + CCoinsViewMemPool view(pcoinsTip.get(), mempool); if (!view.GetCoin(out, coin) || mempool.isSpent(out)) { return NullUniValue; } @@ -1046,7 +1048,7 @@ UniValue verifychain(const JSONRPCRequest& request) if (!request.params[1].isNull()) nCheckDepth = request.params[1].get_int(); - return CVerifyDB().VerifyDB(Params(), pcoinsTip, nCheckLevel, nCheckDepth); + return CVerifyDB().VerifyDB(Params(), pcoinsTip.get(), nCheckLevel, nCheckDepth); } /** Implementation of IsSuperMajority with better feedback */ @@ -1128,45 +1130,46 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) "Returns an object containing various state info regarding blockchain processing.\n" "\nResult:\n" "{\n" - " \"chain\": \"xxxx\", (string) current network name as defined in BIP70 (main, test, regtest)\n" - " \"blocks\": xxxxxx, (numeric) the current number of blocks processed in the server\n" - " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n" - " \"bestblockhash\": \"...\", (string) the hash of the currently best block\n" - " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" - " \"mediantime\": xxxxxx, (numeric) median time for the current best block\n" + " \"chain\": \"xxxx\", (string) current network name as defined in BIP70 (main, test, regtest)\n" + " \"blocks\": xxxxxx, (numeric) the current number of blocks processed in the server\n" + " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n" + " \"bestblockhash\": \"...\", (string) the hash of the currently best block\n" + " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" + " \"mediantime\": xxxxxx, (numeric) median time for the current best block\n" " \"verificationprogress\": xxxx, (numeric) estimate of verification progress [0..1]\n" - " \"chainwork\": \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n" - " \"size_on_disk\": xxxxxx, (numeric) the estimated size of the block and undo files on disk\n" - " \"pruned\": xx, (boolean) if the blocks are subject to pruning\n" - " \"pruneheight\": xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n" - " \"automatic_pruning\": xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n" + " \"initialblockdownload\": xxxx, (bool) (debug information) estimate of whether this node is in Initial Block Download mode.\n" + " \"chainwork\": \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n" + " \"size_on_disk\": xxxxxx, (numeric) the estimated size of the block and undo files on disk\n" + " \"pruned\": xx, (boolean) if the blocks are subject to pruning\n" + " \"pruneheight\": xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n" + " \"automatic_pruning\": xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n" " \"prune_target_size\": xxxxxx, (numeric) the target size used by pruning (only present if automatic pruning is enabled)\n" - " \"softforks\": [ (array) status of softforks in progress\n" + " \"softforks\": [ (array) status of softforks in progress\n" " {\n" - " \"id\": \"xxxx\", (string) name of softfork\n" - " \"version\": xx, (numeric) block version\n" - " \"reject\": { (object) progress toward rejecting pre-softfork blocks\n" - " \"status\": xx, (boolean) true if threshold reached\n" + " \"id\": \"xxxx\", (string) name of softfork\n" + " \"version\": xx, (numeric) block version\n" + " \"reject\": { (object) progress toward rejecting pre-softfork blocks\n" + " \"status\": xx, (boolean) true if threshold reached\n" " },\n" " }, ...\n" " ],\n" - " \"bip9_softforks\": { (object) status of BIP9 softforks in progress\n" - " \"xxxx\" : { (string) name of the softfork\n" - " \"status\": \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n" - " \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n" - " \"startTime\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n" - " \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n" - " \"since\": xx, (numeric) height of the first block to which the status applies\n" - " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork (only for \"started\" status)\n" - " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n" - " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n" - " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n" - " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n" - " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n" + " \"bip9_softforks\": { (object) status of BIP9 softforks in progress\n" + " \"xxxx\" : { (string) name of the softfork\n" + " \"status\": \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n" + " \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n" + " \"startTime\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n" + " \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n" + " \"since\": xx, (numeric) height of the first block to which the status applies\n" + " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork (only for \"started\" status)\n" + " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n" + " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n" + " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n" + " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n" + " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n" " }\n" " }\n" " }\n" - " \"warnings\" : \"...\", (string) any network and blockchain warnings.\n" + " \"warnings\" : \"...\", (string) any network and blockchain warnings.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockchaininfo", "") @@ -1183,6 +1186,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) obj.push_back(Pair("difficulty", (double)GetDifficulty())); obj.push_back(Pair("mediantime", (int64_t)chainActive.Tip()->GetMedianTimePast())); obj.push_back(Pair("verificationprogress", GuessVerificationProgress(Params().TxData(), chainActive.Tip()))); + obj.push_back(Pair("initialblockdownload", IsInitialBlockDownload())); obj.push_back(Pair("chainwork", chainActive.Tip()->nChainWork.GetHex())); obj.push_back(Pair("size_on_disk", CalculateCurrentUsage())); obj.push_back(Pair("pruned", fPruneMode)); diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index f79439f038..0ba0e968a7 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -455,7 +455,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) { // Wait to respond until either the best block changes, OR a minute has passed and there are more transactions uint256 hashWatchedChain; - boost::system_time checktxtime; + std::chrono::steady_clock::time_point checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) @@ -476,17 +476,17 @@ UniValue getblocktemplate(const JSONRPCRequest& request) // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { - checktxtime = boost::get_system_time() + boost::posix_time::minutes(1); + checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1); - boost::unique_lock<boost::mutex> lock(csBestBlock); + WaitableLock lock(csBestBlock); while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning()) { - if (!cvBlockChange.timed_wait(lock, checktxtime)) + if (cvBlockChange.wait_until(lock, checktxtime) == std::cv_status::timeout) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) break; - checktxtime += boost::posix_time::seconds(10); + checktxtime += std::chrono::seconds(10); } } } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index 521b49e2a7..d042fa31d5 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -608,6 +608,7 @@ static const CRPCCommand commands[] = { // category name actor (function) argNames // --------------------- ------------------------ ----------------------- ---------- { "control", "getmemoryinfo", &getmemoryinfo, {"mode"} }, + { "control", "logging", &logging, {"include", "exclude"}}, { "util", "validateaddress", &validateaddress, {"address"} }, /* uses wallet if enabled */ { "util", "createmultisig", &createmultisig, {"nrequired","keys"} }, { "util", "verifymessage", &verifymessage, {"address","signature","message"} }, @@ -617,7 +618,6 @@ static const CRPCCommand commands[] = { "hidden", "setmocktime", &setmocktime, {"timestamp"}}, { "hidden", "echo", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, { "hidden", "echojson", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, - { "hidden", "logging", &logging, {"include", "exclude"}}, }; void RegisterMiscRPCCommands(CRPCTable &t) diff --git a/src/streams.h b/src/streams.h index 9a3badea57..5dbeaac9a5 100644 --- a/src/streams.h +++ b/src/streams.h @@ -345,18 +345,16 @@ public: // Read from the beginning of the buffer unsigned int nReadPosNext = nReadPos + nSize; - if (nReadPosNext >= vch.size()) + if (nReadPosNext > vch.size()) { + throw std::ios_base::failure("CDataStream::read(): end of data"); + } + memcpy(pch, &vch[nReadPos], nSize); + if (nReadPosNext == vch.size()) { - if (nReadPosNext > vch.size()) - { - throw std::ios_base::failure("CDataStream::read(): end of data"); - } - memcpy(pch, &vch[nReadPos], nSize); nReadPos = 0; vch.clear(); return; } - memcpy(pch, &vch[nReadPos], nSize); nReadPos = nReadPosNext; } diff --git a/src/sync.h b/src/sync.h index 0871c5fb4d..20556af890 100644 --- a/src/sync.h +++ b/src/sync.h @@ -10,7 +10,9 @@ #include <boost/thread/condition_variable.hpp> #include <boost/thread/mutex.hpp> -#include <boost/thread/recursive_mutex.hpp> +#include <condition_variable> +#include <thread> +#include <mutex> //////////////////////////////////////////////// @@ -21,17 +23,17 @@ /* CCriticalSection mutex; - boost::recursive_mutex mutex; + std::recursive_mutex mutex; LOCK(mutex); - boost::unique_lock<boost::recursive_mutex> criticalblock(mutex); + std::unique_lock<std::recursive_mutex> criticalblock(mutex); LOCK2(mutex1, mutex2); - boost::unique_lock<boost::recursive_mutex> criticalblock1(mutex1); - boost::unique_lock<boost::recursive_mutex> criticalblock2(mutex2); + std::unique_lock<std::recursive_mutex> criticalblock1(mutex1); + std::unique_lock<std::recursive_mutex> criticalblock2(mutex2); TRY_LOCK(mutex, name); - boost::unique_lock<boost::recursive_mutex> name(mutex, boost::try_to_lock_t); + std::unique_lock<std::recursive_mutex> name(mutex, std::try_to_lock_t); ENTER_CRITICAL_SECTION(mutex); // no RAII mutex.lock(); @@ -85,10 +87,10 @@ void static inline DeleteLock(void* cs) {} #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) /** - * Wrapped boost mutex: supports recursive locking, but no waiting + * Wrapped mutex: supports recursive locking, but no waiting * TODO: We should move away from using the recursive lock by default. */ -class CCriticalSection : public AnnotatedMixin<boost::recursive_mutex> +class CCriticalSection : public AnnotatedMixin<std::recursive_mutex> { public: ~CCriticalSection() { @@ -96,22 +98,24 @@ public: } }; -/** Wrapped boost mutex: supports waiting but not recursive locking */ -typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection; +/** Wrapped mutex: supports waiting but not recursive locking */ +typedef AnnotatedMixin<std::mutex> CWaitableCriticalSection; -/** Just a typedef for boost::condition_variable, can be wrapped later if desired */ -typedef boost::condition_variable CConditionVariable; +/** Just a typedef for std::condition_variable, can be wrapped later if desired */ +typedef std::condition_variable CConditionVariable; + +/** Just a typedef for std::unique_lock, can be wrapped later if desired */ +typedef std::unique_lock<std::mutex> WaitableLock; #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char* pszName, const char* pszFile, int nLine); #endif -/** Wrapper around boost::unique_lock<Mutex> */ -template <typename Mutex> -class SCOPED_LOCKABLE CMutexLock +/** Wrapper around std::unique_lock<CCriticalSection> */ +class SCOPED_LOCKABLE CCriticalBlock { private: - boost::unique_lock<Mutex> lock; + std::unique_lock<CCriticalSection> lock; void Enter(const char* pszName, const char* pszFile, int nLine) { @@ -136,7 +140,7 @@ private: } public: - CMutexLock(Mutex& mutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : lock(mutexIn, boost::defer_lock) + CCriticalBlock(CCriticalSection& mutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : lock(mutexIn, std::defer_lock) { if (fTry) TryEnter(pszName, pszFile, nLine); @@ -144,18 +148,18 @@ public: Enter(pszName, pszFile, nLine); } - CMutexLock(Mutex* pmutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn) + CCriticalBlock(CCriticalSection* pmutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn) { if (!pmutexIn) return; - lock = boost::unique_lock<Mutex>(*pmutexIn, boost::defer_lock); + lock = std::unique_lock<CCriticalSection>(*pmutexIn, std::defer_lock); if (fTry) TryEnter(pszName, pszFile, nLine); else Enter(pszName, pszFile, nLine); } - ~CMutexLock() UNLOCK_FUNCTION() + ~CCriticalBlock() UNLOCK_FUNCTION() { if (lock.owns_lock()) LeaveCritical(); @@ -167,8 +171,6 @@ public: } }; -typedef CMutexLock<CCriticalSection> CCriticalBlock; - #define PASTE(x, y) x ## y #define PASTE2(x, y) PASTE(x, y) diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp index b88ad5ed1b..bf6f2533df 100644 --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -40,8 +40,141 @@ CService ip(uint32_t i) static NodeId id = 0; +void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds); + BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup) +// Test eviction of an outbound peer whose chain never advances +// Mock a node connection, and use mocktime to simulate a peer +// which never sends any headers messages. PeerLogic should +// decide to evict that outbound peer, after the appropriate timeouts. +// Note that we protect 4 outbound nodes from being subject to +// this logic; this test takes advantage of that protection only +// being applied to nodes which send headers with sufficient +// work. +BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) +{ + std::atomic<bool> interruptDummy(false); + + // Mock an outbound peer + CAddress addr1(ip(0xa0b0c001), NODE_NONE); + CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false); + dummyNode1.SetSendVersion(PROTOCOL_VERSION); + + peerLogic->InitializeNode(&dummyNode1); + dummyNode1.nVersion = 1; + dummyNode1.fSuccessfullyConnected = true; + + // This test requires that we have a chain with non-zero work. + LOCK(cs_main); + BOOST_CHECK(chainActive.Tip() != nullptr); + BOOST_CHECK(chainActive.Tip()->nChainWork > 0); + + // Test starts here + LOCK(dummyNode1.cs_sendProcessing); + peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in getheaders + LOCK(dummyNode1.cs_vSend); + BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); + dummyNode1.vSendMsg.clear(); + + int64_t nStartTime = GetTime(); + // Wait 21 minutes + SetMockTime(nStartTime+21*60); + peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in getheaders + BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); + // Wait 3 more minutes + SetMockTime(nStartTime+24*60); + peerLogic->SendMessages(&dummyNode1, interruptDummy); // should result in disconnect + BOOST_CHECK(dummyNode1.fDisconnect == true); + SetMockTime(0); + + bool dummy; + peerLogic->FinalizeNode(dummyNode1.GetId(), dummy); +} + +void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic) +{ + CAddress addr(ip(GetRandInt(0xffffffff)), NODE_NONE); + vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false)); + CNode &node = *vNodes.back(); + node.SetSendVersion(PROTOCOL_VERSION); + + peerLogic.InitializeNode(&node); + node.nVersion = 1; + node.fSuccessfullyConnected = true; + + CConnmanTest::AddNode(node); +} + +BOOST_AUTO_TEST_CASE(stale_tip_peer_management) +{ + const Consensus::Params& consensusParams = Params().GetConsensus(); + constexpr int nMaxOutbound = 8; + CConnman::Options options; + options.nMaxConnections = 125; + options.nMaxOutbound = nMaxOutbound; + options.nMaxFeeler = 1; + + connman->Init(options); + std::vector<CNode *> vNodes; + + // Mock some outbound peers + for (int i=0; i<nMaxOutbound; ++i) { + AddRandomOutboundPeer(vNodes, *peerLogic); + } + + peerLogic->CheckForStaleTipAndEvictPeers(consensusParams); + + // No nodes should be marked for disconnection while we have no extra peers + for (const CNode *node : vNodes) { + BOOST_CHECK(node->fDisconnect == false); + } + + SetMockTime(GetTime() + 3*consensusParams.nPowTargetSpacing + 1); + + // Now tip should definitely be stale, and we should look for an extra + // outbound peer + peerLogic->CheckForStaleTipAndEvictPeers(consensusParams); + BOOST_CHECK(connman->GetTryNewOutboundPeer()); + + // Still no peers should be marked for disconnection + for (const CNode *node : vNodes) { + BOOST_CHECK(node->fDisconnect == false); + } + + // If we add one more peer, something should get marked for eviction + // on the next check (since we're mocking the time to be in the future, the + // required time connected check should be satisfied). + AddRandomOutboundPeer(vNodes, *peerLogic); + + peerLogic->CheckForStaleTipAndEvictPeers(consensusParams); + for (int i=0; i<nMaxOutbound; ++i) { + BOOST_CHECK(vNodes[i]->fDisconnect == false); + } + // Last added node should get marked for eviction + BOOST_CHECK(vNodes.back()->fDisconnect == true); + + vNodes.back()->fDisconnect = false; + + // Update the last announced block time for the last + // peer, and check that the next newest node gets evicted. + UpdateLastBlockAnnounceTime(vNodes.back()->GetId(), GetTime()); + + peerLogic->CheckForStaleTipAndEvictPeers(consensusParams); + for (int i=0; i<nMaxOutbound-1; ++i) { + BOOST_CHECK(vNodes[i]->fDisconnect == false); + } + BOOST_CHECK(vNodes[nMaxOutbound-1]->fDisconnect == true); + BOOST_CHECK(vNodes.back()->fDisconnect == false); + + bool dummy; + for (const CNode *node : vNodes) { + peerLogic->FinalizeNode(node->GetId(), dummy); + } + + CConnmanTest::ClearNodes(); +} + BOOST_AUTO_TEST_CASE(DoS_banning) { std::atomic<bool> interruptDummy(false); @@ -53,7 +186,11 @@ BOOST_AUTO_TEST_CASE(DoS_banning) peerLogic->InitializeNode(&dummyNode1); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; - Misbehaving(dummyNode1.GetId(), 100); // Should get banned + { + LOCK(cs_main); + Misbehaving(dummyNode1.GetId(), 100); // Should get banned + } + LOCK(dummyNode1.cs_sendProcessing); peerLogic->SendMessages(&dummyNode1, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned @@ -64,13 +201,24 @@ BOOST_AUTO_TEST_CASE(DoS_banning) peerLogic->InitializeNode(&dummyNode2); dummyNode2.nVersion = 1; dummyNode2.fSuccessfullyConnected = true; - Misbehaving(dummyNode2.GetId(), 50); + { + LOCK(cs_main); + Misbehaving(dummyNode2.GetId(), 50); + } + LOCK(dummyNode2.cs_sendProcessing); peerLogic->SendMessages(&dummyNode2, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr2)); // 2 not banned yet... BOOST_CHECK(connman->IsBanned(addr1)); // ... but 1 still should be - Misbehaving(dummyNode2.GetId(), 50); + { + LOCK(cs_main); + Misbehaving(dummyNode2.GetId(), 50); + } peerLogic->SendMessages(&dummyNode2, interruptDummy); BOOST_CHECK(connman->IsBanned(addr2)); + + bool dummy; + peerLogic->FinalizeNode(dummyNode1.GetId(), dummy); + peerLogic->FinalizeNode(dummyNode2.GetId(), dummy); } BOOST_AUTO_TEST_CASE(DoS_banscore) @@ -85,16 +233,29 @@ BOOST_AUTO_TEST_CASE(DoS_banscore) peerLogic->InitializeNode(&dummyNode1); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; - Misbehaving(dummyNode1.GetId(), 100); + { + LOCK(cs_main); + Misbehaving(dummyNode1.GetId(), 100); + } + LOCK(dummyNode1.cs_sendProcessing); peerLogic->SendMessages(&dummyNode1, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); - Misbehaving(dummyNode1.GetId(), 10); + { + LOCK(cs_main); + Misbehaving(dummyNode1.GetId(), 10); + } peerLogic->SendMessages(&dummyNode1, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); - Misbehaving(dummyNode1.GetId(), 1); + { + LOCK(cs_main); + Misbehaving(dummyNode1.GetId(), 1); + } peerLogic->SendMessages(&dummyNode1, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); gArgs.ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD)); + + bool dummy; + peerLogic->FinalizeNode(dummyNode1.GetId(), dummy); } BOOST_AUTO_TEST_CASE(DoS_bantime) @@ -112,7 +273,11 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) dummyNode.nVersion = 1; dummyNode.fSuccessfullyConnected = true; - Misbehaving(dummyNode.GetId(), 100); + { + LOCK(cs_main); + Misbehaving(dummyNode.GetId(), 100); + } + LOCK(dummyNode.cs_sendProcessing); peerLogic->SendMessages(&dummyNode, interruptDummy); BOOST_CHECK(connman->IsBanned(addr)); @@ -121,11 +286,15 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime+60*60*24+1); BOOST_CHECK(!connman->IsBanned(addr)); + + bool dummy; + peerLogic->FinalizeNode(dummyNode.GetId(), dummy); } CTransactionRef RandomOrphan() { std::map<uint256, COrphanTx>::iterator it; + LOCK(cs_main); it = mapOrphanTransactions.lower_bound(InsecureRand256()); if (it == mapOrphanTransactions.end()) it = mapOrphanTransactions.begin(); @@ -195,6 +364,7 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) BOOST_CHECK(!AddOrphanTx(MakeTransactionRef(tx), i)); } + LOCK(cs_main); // Test EraseOrphansFor: for (NodeId i = 0; i < 3; i++) { diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index f2d5b385d0..71baf286e9 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -62,6 +62,7 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2])); + LOCK(pool.cs); BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); // Do a simple ShortTxIDs RT @@ -161,6 +162,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2])); + LOCK(pool.cs); BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; @@ -227,6 +229,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[1]->GetHash(), entry.FromTx(*block.vtx[1])); + LOCK(pool.cs); BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index efddafe17e..c1625cf476 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -125,7 +125,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) create_directories(ph); // Set up a non-obfuscated wrapper to write some initial data. - CDBWrapper* dbw = new CDBWrapper(ph, (1 << 10), false, false, false); + std::unique_ptr<CDBWrapper> dbw = MakeUnique<CDBWrapper>(ph, (1 << 10), false, false, false); char key = 'k'; uint256 in = InsecureRand256(); uint256 res; @@ -135,8 +135,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); // Call the destructor to free leveldb LOCK - delete dbw; - dbw = nullptr; + dbw.reset(); // Now, set up another wrapper that wants to obfuscate the same directory CDBWrapper odbw(ph, (1 << 10), false, false, true); @@ -167,7 +166,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) create_directories(ph); // Set up a non-obfuscated wrapper to write some initial data. - CDBWrapper* dbw = new CDBWrapper(ph, (1 << 10), false, false, false); + std::unique_ptr<CDBWrapper> dbw = MakeUnique<CDBWrapper>(ph, (1 << 10), false, false, false); char key = 'k'; uint256 in = InsecureRand256(); uint256 res; @@ -177,8 +176,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); // Call the destructor to free leveldb LOCK - delete dbw; - dbw = nullptr; + dbw.reset(); // Simulate a -reindex by wiping the existing data store CDBWrapper odbw(ph, (1 << 10), false, true, true); diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index 116210a297..c7abad8026 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -165,6 +165,7 @@ BOOST_AUTO_TEST_CASE(MempoolIndexingTest) sortedOrder[2] = tx1.GetHash().ToString(); // 10000 sortedOrder[3] = tx4.GetHash().ToString(); // 15000 sortedOrder[4] = tx2.GetHash().ToString(); // 20000 + LOCK(pool.cs); CheckSort<descendant_score>(pool, sortedOrder); /* low fee but with high fee child */ @@ -375,6 +376,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) } sortedOrder[4] = tx3.GetHash().ToString(); // 0 + LOCK(pool.cs); CheckSort<ancestor_score>(pool, sortedOrder); /* low fee parent with high fee child */ diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 41e0626eb9..2851808cf4 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -335,23 +335,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) BOOST_CHECK_THROW(AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); - // invalid (pre-p2sh) txn in mempool, template creation fails - tx.vin[0].prevout.hash = txFirst[0]->GetHash(); - tx.vin[0].prevout.n = 0; - tx.vin[0].scriptSig = CScript() << OP_1; - tx.vout[0].nValue = BLOCKSUBSIDY-LOWFEE; - script = CScript() << OP_0; - tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script)); - hash = tx.GetHash(); - mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); - tx.vin[0].prevout.hash = hash; - tx.vin[0].scriptSig = CScript() << std::vector<unsigned char>(script.begin(), script.end()); - tx.vout[0].nValue -= LOWFEE; - hash = tx.GetHash(); - mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); - BOOST_CHECK_THROW(AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error); - mempool.clear(); - // double spend txn pair in mempool, template creation fails tx.vin[0].prevout.hash = txFirst[0]->GetHash(); tx.vin[0].scriptSig = CScript() << OP_1; @@ -391,6 +374,24 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey)); + + // invalid p2sh txn in mempool, template creation fails + tx.vin[0].prevout.hash = txFirst[0]->GetHash(); + tx.vin[0].prevout.n = 0; + tx.vin[0].scriptSig = CScript() << OP_1; + tx.vout[0].nValue = BLOCKSUBSIDY-LOWFEE; + script = CScript() << OP_0; + tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script)); + hash = tx.GetHash(); + mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); + tx.vin[0].prevout.hash = hash; + tx.vin[0].scriptSig = CScript() << std::vector<unsigned char>(script.begin(), script.end()); + tx.vout[0].nValue -= LOWFEE; + hash = tx.GetHash(); + mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); + BOOST_CHECK_THROW(AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error); + mempool.clear(); + // Delete the dummy blocks again. while (chainActive.Tip()->nHeight > nHeight) { CBlockIndex* del = chainActive.Tip(); diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp index 79bc48a118..fbe9e217ec 100644 --- a/src/test/test_bitcoin.cpp +++ b/src/test/test_bitcoin.cpp @@ -25,6 +25,18 @@ #include <memory> +void CConnmanTest::AddNode(CNode& node) +{ + LOCK(g_connman->cs_vNodes); + g_connman->vNodes.push_back(&node); +} + +void CConnmanTest::ClearNodes() +{ + LOCK(g_connman->cs_vNodes); + g_connman->vNodes.clear(); +} + uint256 insecure_rand_seed = GetRandHash(); FastRandomContext insecure_rand_ctx(insecure_rand_seed); @@ -69,9 +81,9 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha GetMainSignals().RegisterBackgroundSignalScheduler(scheduler); mempool.setSanityCheck(1.0); - pblocktree = new CBlockTreeDB(1 << 20, true); - pcoinsdbview = new CCoinsViewDB(1 << 23, true); - pcoinsTip = new CCoinsViewCache(pcoinsdbview); + pblocktree.reset(new CBlockTreeDB(1 << 20, true)); + pcoinsdbview.reset(new CCoinsViewDB(1 << 23, true)); + pcoinsTip.reset(new CCoinsViewCache(pcoinsdbview.get())); if (!LoadGenesisBlock(chainparams)) { throw std::runtime_error("LoadGenesisBlock failed."); } @@ -86,7 +98,7 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha threadGroup.create_thread(&ThreadScriptCheck); g_connman = std::unique_ptr<CConnman>(new CConnman(0x1337, 0x1337)); // Deterministic randomness for tests. connman = g_connman.get(); - peerLogic.reset(new PeerLogicValidation(connman)); + peerLogic.reset(new PeerLogicValidation(connman, scheduler)); } TestingSetup::~TestingSetup() @@ -98,14 +110,17 @@ TestingSetup::~TestingSetup() g_connman.reset(); peerLogic.reset(); UnloadBlockIndex(); - delete pcoinsTip; - delete pcoinsdbview; - delete pblocktree; + pcoinsTip.reset(); + pcoinsdbview.reset(); + pblocktree.reset(); fs::remove_all(pathTemp); } TestChain100Setup::TestChain100Setup() : TestingSetup(CBaseChainParams::REGTEST) { + // CreateAndProcessBlock() does not support building SegWit blocks, so don't activate in these tests. + // TODO: fix the code to support SegWit blocks. + UpdateVersionBitsParameters(Consensus::DEPLOYMENT_SEGWIT, 0, Consensus::BIP9Deployment::NO_TIMEOUT); // Generate a 100-block chain: coinbaseKey.MakeNewKey(true); CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG; @@ -134,7 +149,10 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& block.vtx.push_back(MakeTransactionRef(tx)); // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; - IncrementExtraNonce(&block, chainActive.Tip(), extraNonce); + { + LOCK(cs_main); + IncrementExtraNonce(&block, chainActive.Tip(), extraNonce); + } while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce; diff --git a/src/test/test_bitcoin.h b/src/test/test_bitcoin.h index 2390aca342..6398386d42 100644 --- a/src/test/test_bitcoin.h +++ b/src/test/test_bitcoin.h @@ -49,9 +49,14 @@ struct BasicTestingSetup { * Included are data directory, coins database, script check threads setup. */ class CConnman; +class CNode; +struct CConnmanTest { + static void AddNode(CNode& node); + static void ClearNodes(); +}; + class PeerLogicValidation; struct TestingSetup: public BasicTestingSetup { - CCoinsViewDB *pcoinsdbview; fs::path pathTemp; boost::thread_group threadGroup; CConnman* connman; diff --git a/src/test/test_bitcoin_fuzzy.cpp b/src/test/test_bitcoin_fuzzy.cpp index 581ad2ffa0..6694c5caa8 100644 --- a/src/test/test_bitcoin_fuzzy.cpp +++ b/src/test/test_bitcoin_fuzzy.cpp @@ -19,6 +19,7 @@ #include "undo.h" #include "version.h" #include "pubkey.h" +#include "blockencodings.h" #include <stdint.h> #include <unistd.h> @@ -45,6 +46,8 @@ enum TEST_ID { CBLOOMFILTER_DESERIALIZE, CDISKBLOCKINDEX_DESERIALIZE, CTXOUTCOMPRESSOR_DESERIALIZE, + BLOCKTRANSACTIONS_DESERIALIZE, + BLOCKTRANSACTIONSREQUEST_DESERIALIZE, TEST_ID_END }; @@ -245,6 +248,26 @@ int test_one_input(std::vector<uint8_t> buffer) { break; } + case BLOCKTRANSACTIONS_DESERIALIZE: + { + try + { + BlockTransactions bt; + ds >> bt; + } catch (const std::ios_base::failure& e) {return 0;} + + break; + } + case BLOCKTRANSACTIONSREQUEST_DESERIALIZE: + { + try + { + BlockTransactionsRequest btr; + ds >> btr; + } catch (const std::ios_base::failure& e) {return 0;} + + break; + } default: return 0; } diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 82ca93e7da..ce3060a5f3 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -66,6 +66,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) // Test 1: block with both of those transactions should be rejected. block = CreateAndProcessBlock(spends, scriptPubKey); + LOCK(cs_main); BOOST_CHECK(chainActive.Tip()->GetBlockHash() != block.GetHash()); // Test 2: ... and should be rejected if spend1 is in the memory pool @@ -119,7 +120,7 @@ void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_fl // WITNESS requires P2SH test_flags |= SCRIPT_VERIFY_P2SH; } - bool ret = CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, nullptr); + bool ret = CheckInputs(tx, state, pcoinsTip.get(), true, test_flags, true, add_to_cache, txdata, nullptr); // CheckInputs should succeed iff test_flags doesn't intersect with // failing_flags bool expected_return_value = !(test_flags & failing_flags); @@ -135,13 +136,13 @@ void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_fl if (ret && add_to_cache) { // Check that we get a cache hit if the tx was valid std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputs(tx, state, pcoinsTip.get(), true, test_flags, true, add_to_cache, txdata, &scriptchecks)); BOOST_CHECK(scriptchecks.empty()); } else { // Check that we get script executions to check, if the transaction // was invalid, or we didn't add to cache. std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputs(tx, state, pcoinsTip.get(), true, test_flags, true, add_to_cache, txdata, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), tx.vin.size()); } } @@ -151,7 +152,10 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) { // Test that passing CheckInputs with one set of script flags doesn't imply // that we would pass again with a different set of flags. - InitScriptExecutionCache(); + { + LOCK(cs_main); + InitScriptExecutionCache(); + } CScript p2pk_scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG; CScript p2sh_scriptPubKey = GetScriptForDestination(CScriptID(p2pk_scriptPubKey)); @@ -201,13 +205,13 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) CValidationState state; PrecomputedTransactionData ptd_spend_tx(spend_tx); - BOOST_CHECK(!CheckInputs(spend_tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr)); + BOOST_CHECK(!CheckInputs(spend_tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr)); // If we call again asking for scriptchecks (as happens in // ConnectBlock), we should add a script check object for this -- we're // not caching invalidity (if that changes, delete this test case). std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputs(spend_tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks)); + BOOST_CHECK(CheckInputs(spend_tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), 1); // Test that CheckInputs returns true iff DERSIG-enforcing flags are @@ -268,7 +272,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 100; CValidationState state; PrecomputedTransactionData txdata(invalid_with_cltv_tx); - BOOST_CHECK(CheckInputs(invalid_with_cltv_tx, state, pcoinsTip, true, SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputs(invalid_with_cltv_tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr)); } // TEST CHECKSEQUENCEVERIFY @@ -296,7 +300,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 100; CValidationState state; PrecomputedTransactionData txdata(invalid_with_csv_tx); - BOOST_CHECK(CheckInputs(invalid_with_csv_tx, state, pcoinsTip, true, SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputs(invalid_with_csv_tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr)); } // TODO: add tests for remaining script flags @@ -358,12 +362,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) CValidationState state; PrecomputedTransactionData txdata(tx); // This transaction is now invalid under segwit, because of the second input. - BOOST_CHECK(!CheckInputs(tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr)); + BOOST_CHECK(!CheckInputs(tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr)); std::vector<CScriptCheck> scriptchecks; // Make sure this transaction was not cached (ie because the first // input was valid) - BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputs(tx, state, pcoinsTip.get(), true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks)); // Should get 2 script checks back -- caching is on a whole-transaction basis. BOOST_CHECK_EQUAL(scriptchecks.size(), 2); } diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index 882afb2e20..db537d3932 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -32,6 +32,12 @@ public: int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, paramsDummy, cache); } }; +class TestAlwaysActiveConditionChecker : public TestConditionChecker +{ +public: + int64_t BeginTime(const Consensus::Params& params) const override { return Consensus::BIP9Deployment::ALWAYS_ACTIVE; } +}; + #define CHECKERS 6 class VersionBitsTester @@ -43,6 +49,8 @@ class VersionBitsTester // The first one performs all checks, the second only 50%, the third only 25%, etc... // This is to test whether lack of cached information leads to the same results. TestConditionChecker checker[CHECKERS]; + // Another 6 that assume always active activation + TestAlwaysActiveConditionChecker checker_always[CHECKERS]; // Test counter (to identify failures) int num; @@ -56,6 +64,7 @@ public: } for (unsigned int i = 0; i < CHECKERS; i++) { checker[i] = TestConditionChecker(); + checker_always[i] = TestAlwaysActiveConditionChecker(); } vpblock.clear(); return *this; @@ -82,6 +91,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateSinceHeightFor(vpblock.empty() ? nullptr : vpblock.back()) == height, strprintf("Test %i for StateSinceHeight", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateSinceHeightFor(vpblock.empty() ? nullptr : vpblock.back()) == 0, strprintf("Test %i for StateSinceHeight (always active)", num)); } } num++; @@ -92,6 +102,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_DEFINED, strprintf("Test %i for DEFINED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -102,6 +113,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_STARTED, strprintf("Test %i for STARTED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -112,6 +124,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_LOCKED_IN, strprintf("Test %i for LOCKED_IN", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -122,6 +135,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -132,6 +146,7 @@ public: for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_FAILED, strprintf("Test %i for FAILED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; diff --git a/src/tinyformat.h b/src/tinyformat.h index 2e453e56bb..d34cfaa94f 100644 --- a/src/tinyformat.h +++ b/src/tinyformat.h @@ -495,7 +495,11 @@ namespace detail { class FormatArg { public: - FormatArg() {} + FormatArg() + : m_value(nullptr), + m_formatImpl(nullptr), + m_toIntImpl(nullptr) + { } template<typename T> explicit FormatArg(const T& value) @@ -507,11 +511,15 @@ class FormatArg void format(std::ostream& out, const char* fmtBegin, const char* fmtEnd, int ntrunc) const { + assert(m_value); + assert(m_formatImpl); m_formatImpl(out, fmtBegin, fmtEnd, ntrunc, m_value); } int toInt() const { + assert(m_value); + assert(m_toIntImpl); return m_toIntImpl(m_value); } @@ -712,23 +720,27 @@ inline const char* streamStateFromFormat(std::ostream& out, bool& spacePadPositi break; case 'X': out.setf(std::ios::uppercase); + // Falls through case 'x': case 'p': out.setf(std::ios::hex, std::ios::basefield); intConversion = true; break; case 'E': out.setf(std::ios::uppercase); + // Falls through case 'e': out.setf(std::ios::scientific, std::ios::floatfield); out.setf(std::ios::dec, std::ios::basefield); break; case 'F': out.setf(std::ios::uppercase); + // Falls through case 'f': out.setf(std::ios::fixed, std::ios::floatfield); break; case 'G': out.setf(std::ios::uppercase); + // Falls through case 'g': out.setf(std::ios::dec, std::ios::basefield); // As in boost::format, let stream decide float format. diff --git a/src/txmempool.cpp b/src/txmempool.cpp index b0306811cb..c509e22a2e 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -317,7 +317,7 @@ void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, CAmount modifyFe assert(int64_t(nCountWithDescendants) > 0); } -void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int modifySigOps) +void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int64_t modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); diff --git a/src/txmempool.h b/src/txmempool.h index 929d223588..f112ee1775 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -109,7 +109,7 @@ public: // Adjusts the descendant state. void UpdateDescendantState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount); // Adjusts the ancestor state - void UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int modifySigOps); + void UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int64_t modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(int64_t feeDelta); diff --git a/src/uint256.h b/src/uint256.h index 94a4f7fc30..259161ce0a 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -19,7 +19,7 @@ template<unsigned int BITS> class base_blob { protected: - enum { WIDTH=BITS/8 }; + static constexpr int WIDTH = BITS / 8; uint8_t data[WIDTH]; public: base_blob() diff --git a/src/util.h b/src/util.h index 480a80c0a3..43acd93ee2 100644 --- a/src/util.h +++ b/src/util.h @@ -326,4 +326,11 @@ template <typename Callable> void TraceThread(const char* name, Callable func) std::string CopyrightHolders(const std::string& strPrefix); +//! Substitute for C++14 std::make_unique. +template <typename T, typename... Args> +std::unique_ptr<T> MakeUnique(Args&&... args) +{ + return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); +} + #endif // BITCOIN_UTIL_H diff --git a/src/validation.cpp b/src/validation.cpp index 7f6697c780..e7b6fc52a8 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -156,6 +156,26 @@ namespace { /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; + /** In order to efficiently track invalidity of headers, we keep the set of + * blocks which we tried to connect and found to be invalid here (ie which + * were set to BLOCK_FAILED_VALID since the last restart). We can then + * walk this set and check if a new header is a descendant of something in + * this set, preventing us from having to walk mapBlockIndex when we try + * to connect a bad block and fail. + * + * While this is more complicated than marking everything which descends + * from an invalid block as invalid at the time we discover it to be + * invalid, doing so would require walking all of mapBlockIndex to find all + * descendants. Since this case should be very rare, keeping track of all + * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as + * well. + * + * Because we already walk mapBlockIndex in height-order at startup, we go + * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time, + * instead of putting things in this set. + */ + std::set<CBlockIndex*> g_failed_blocks; + /** Dirty block index entries. */ std::set<CBlockIndex*> setDirtyBlockIndex; @@ -181,9 +201,9 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc return chain.Genesis(); } -CCoinsViewDB *pcoinsdbview = nullptr; -CCoinsViewCache *pcoinsTip = nullptr; -CBlockTreeDB *pblocktree = nullptr; +std::unique_ptr<CCoinsViewDB> pcoinsdbview; +std::unique_ptr<CCoinsViewCache> pcoinsTip; +std::unique_ptr<CBlockTreeDB> pblocktree; enum FlushStateMode { FLUSH_STATE_NONE, @@ -275,7 +295,7 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool } else { // pcoinsTip contains the UTXO set for chainActive.Tip() - CCoinsViewMemPool viewMemPool(pcoinsTip, mempool); + CCoinsViewMemPool viewMemPool(pcoinsTip.get(), mempool); std::vector<int> prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { @@ -404,7 +424,7 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions - mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } @@ -537,7 +557,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool LockPoints lp; { LOCK(pool.cs); - CCoinsViewMemPool viewMemPool(pcoinsTip, pool); + CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); view.SetBackend(viewMemPool); // do all inputs exist? @@ -939,6 +959,9 @@ bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus return error("%s: txid mismatch", __func__); return true; } + + // transaction not found in index, nothing more can be done + return false; } if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it @@ -1177,6 +1200,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew) void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (!state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; + g_failed_blocks.insert(pindex); setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); InvalidChainFound(pindex); @@ -1587,11 +1611,12 @@ static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS]; static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) { AssertLockHeld(cs_main); - // BIP16 didn't become active until Apr 1 2012 - int64_t nBIP16SwitchTime = 1333238400; - bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime); + unsigned int flags = SCRIPT_VERIFY_NONE; - unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; + // Start enforcing P2SH (BIP16) + if (pindex->nHeight >= consensusparams.BIP16Height) { + flags |= SCRIPT_VERIFY_P2SH; + } // Start enforcing the DERSIG (BIP66) rule if (pindex->nHeight >= consensusparams.BIP66Height) { @@ -2080,7 +2105,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { - CCoinsViewCache view(pcoinsTip); + CCoinsViewCache view(pcoinsTip.get()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); @@ -2210,7 +2235,7 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, int64_t nTime3; LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO); { - CCoinsViewCache view(pcoinsTip); + CCoinsViewCache view(pcoinsTip.get()); bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { @@ -2388,7 +2413,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c // any disconnected transactions back to the mempool. UpdateMempoolForReorg(disconnectpool, true); } - mempool.check(pcoinsTip); + mempool.check(pcoinsTip.get()); // Callbacks/notifications for a new best chain. if (fInvalidFound) @@ -2530,17 +2555,18 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C { AssertLockHeld(cs_main); - // Mark the block itself as invalid. - pindex->nStatus |= BLOCK_FAILED_VALID; - setDirtyBlockIndex.insert(pindex); - setBlockIndexCandidates.erase(pindex); + // We first disconnect backwards and then mark the blocks as invalid. + // This prevents a case where pruned nodes may fail to invalidateblock + // and be left unable to start as they have no tip candidates (as there + // are no blocks that meet the "have data and are not invalid per + // nStatus" criteria for inclusion in setBlockIndexCandidates). + + bool pindex_was_in_chain = false; + CBlockIndex *invalid_walk_tip = chainActive.Tip(); DisconnectedBlockTransactions disconnectpool; while (chainActive.Contains(pindex)) { - CBlockIndex *pindexWalk = chainActive.Tip(); - pindexWalk->nStatus |= BLOCK_FAILED_CHILD; - setDirtyBlockIndex.insert(pindexWalk); - setBlockIndexCandidates.erase(pindexWalk); + pindex_was_in_chain = true; // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(state, chainparams, &disconnectpool)) { @@ -2551,6 +2577,21 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C } } + // Now mark the blocks we just disconnected as descendants invalid + // (note this may not be all descendants). + while (pindex_was_in_chain && invalid_walk_tip != pindex) { + invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(invalid_walk_tip); + setBlockIndexCandidates.erase(invalid_walk_tip); + invalid_walk_tip = invalid_walk_tip->pprev; + } + + // Mark the block itself as invalid. + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + setBlockIndexCandidates.erase(pindex); + g_failed_blocks.insert(pindex); + // DisconnectTip will add transactions to disconnectpool; try to add these // back to the mempool. UpdateMempoolForReorg(disconnectpool, true); @@ -2588,6 +2629,7 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex) { // Reset invalid block marker if it was pointing to one of those. pindexBestInvalid = nullptr; } + g_failed_blocks.erase(it->second); } it++; } @@ -3063,6 +3105,21 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); + + if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) { + for (const CBlockIndex* failedit : g_failed_blocks) { + if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) { + assert(failedit->nStatus & BLOCK_FAILED_VALID); + CBlockIndex* invalid_walk = pindexPrev; + while (invalid_walk != failedit) { + invalid_walk->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(invalid_walk); + invalid_walk = invalid_walk->pprev; + } + return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); + } + } + } } if (pindex == nullptr) pindex = AddToBlockIndex(block); @@ -3076,13 +3133,15 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state } // Exposed wrapper for AcceptBlockHeader -bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex) +bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid) { + if (first_invalid != nullptr) first_invalid->SetNull(); { LOCK(cs_main); for (const CBlockHeader& header : headers) { CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast if (!AcceptBlockHeader(header, state, chainparams, &pindex)) { + if (first_invalid) *first_invalid = header; return false; } if (ppindex) { @@ -3112,7 +3171,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation // process an unrequested block if it's new and has enough work to // advance our tip, and isn't too many blocks ahead. bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; - bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); + bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true); // Blocks that are too out-of-order needlessly limit the effectiveness of // pruning, because pruning will not delete block files that contain any // blocks which are too close in height to the tip. Apply this test @@ -3129,9 +3188,15 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation // and unrequested blocks. if (fAlreadyHave) return true; if (!fRequested) { // If we didn't ask for it: - if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned - if (!fHasMoreWork) return true; // Don't process less-work chains - if (fTooFarAhead) return true; // Block height is too high + if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned + if (!fHasMoreOrSameWork) return true; // Don't process less-work chains + if (fTooFarAhead) return true; // Block height is too high + + // Protect against DoS attacks from low-work chains. + // If our tip is behind, a peer could try to send us + // low-work blocks on a fake chain that we would never + // request; don't process these. + if (pindex->nChainWork < nMinimumChainWork) return true; } if (fNewBlock) *fNewBlock = true; @@ -3210,7 +3275,7 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, { AssertLockHeld(cs_main); assert(pindexPrev && pindexPrev == chainActive.Tip()); - CCoinsViewCache viewNew(pcoinsTip); + CCoinsViewCache viewNew(pcoinsTip.get()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; @@ -3483,6 +3548,10 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) pindex->nChainTx = pindex->nTx; } } + if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) { + pindex->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(pindex); + } if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr)) setBlockIndexCandidates.insert(pindex); if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) @@ -3873,6 +3942,7 @@ void UnloadBlockIndex() nLastBlockFile = 0; nBlockSequenceId = 1; setDirtyBlockIndex.clear(); + g_failed_blocks.clear(); setDirtyFileInfo.clear(); versionbitscache.Clear(); for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) { @@ -4287,8 +4357,9 @@ bool LoadMempool(void) } int64_t count = 0; - int64_t skipped = 0; + int64_t expired = 0; int64_t failed = 0; + int64_t already_there = 0; int64_t nNow = GetTime(); try { @@ -4319,10 +4390,18 @@ bool LoadMempool(void) if (state.IsValid()) { ++count; } else { - ++failed; + // mempool may contain the transaction already, e.g. from + // wallet(s) having loaded it while we were processing + // mempool transactions; consider these as valid, instead of + // failed, but mark them as 'already there' + if (mempool.exists(tx->GetHash())) { + ++already_there; + } else { + ++failed; + } } } else { - ++skipped; + ++expired; } if (ShutdownRequested()) return false; @@ -4338,7 +4417,7 @@ bool LoadMempool(void) return false; } - LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count, failed, skipped); + LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there\n", count, failed, expired, already_there); return true; } diff --git a/src/validation.h b/src/validation.h index 6bc52753c5..f2ffdc8993 100644 --- a/src/validation.h +++ b/src/validation.h @@ -247,8 +247,9 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons * @param[out] state This may be set to an Error state if any error occurred processing them * @param[in] chainparams The params for the chain we want to connect to * @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers + * @param[out] first_invalid First header that fails validation, if one exists */ -bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr); +bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr, CBlockHeader *first_invalid=nullptr); /** Check whether enough disk space is available for an incoming block */ bool CheckDiskSpace(uint64_t nAdditionalBytes = 0); @@ -443,13 +444,13 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex); extern CChain chainActive; /** Global variable that points to the coins database (protected by cs_main) */ -extern CCoinsViewDB *pcoinsdbview; +extern std::unique_ptr<CCoinsViewDB> pcoinsdbview; /** Global variable that points to the active CCoinsView (protected by cs_main) */ -extern CCoinsViewCache *pcoinsTip; +extern std::unique_ptr<CCoinsViewCache> pcoinsTip; /** Global variable that points to the active block tree (protected by cs_main) */ -extern CBlockTreeDB *pblocktree; +extern std::unique_ptr<CBlockTreeDB> pblocktree; /** * Return the spend height, which is one more than the inputs.GetBestBlock(). diff --git a/src/versionbits.cpp b/src/versionbits.cpp index 64ae939672..fc1acb3258 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -27,6 +27,11 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* int64_t nTimeStart = BeginTime(params); int64_t nTimeTimeout = EndTime(params); + // Check if this deployment is always active. + if (nTimeStart == Consensus::BIP9Deployment::ALWAYS_ACTIVE) { + return THRESHOLD_ACTIVE; + } + // A block's state is always the same as that of the first of its period, so it is computed based on a pindexPrev whose height equals a multiple of nPeriod - 1. if (pindexPrev != nullptr) { pindexPrev = pindexPrev->GetAncestor(pindexPrev->nHeight - ((pindexPrev->nHeight + 1) % nPeriod)); @@ -136,6 +141,11 @@ BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockI int AbstractThresholdConditionChecker::GetStateSinceHeightFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const { + int64_t start_time = BeginTime(params); + if (start_time == Consensus::BIP9Deployment::ALWAYS_ACTIVE) { + return 0; + } + const ThresholdState initialState = GetStateFor(pindexPrev, params, cache); // BIP 9 about state DEFINED: "The genesis block is by definition in this state for each deployment." diff --git a/src/wallet/crypter.cpp b/src/wallet/crypter.cpp index 8db3bfd69c..5b31a40fc7 100644 --- a/src/wallet/crypter.cpp +++ b/src/wallet/crypter.cpp @@ -152,6 +152,15 @@ bool CCryptoKeyStore::SetCrypted() return true; } +bool CCryptoKeyStore::IsLocked() const +{ + if (!IsCrypted()) { + return false; + } + LOCK(cs_KeyStore); + return vMasterKey.empty(); +} + bool CCryptoKeyStore::Lock() { if (!SetCrypted()) @@ -206,21 +215,23 @@ bool CCryptoKeyStore::Unlock(const CKeyingMaterial& vMasterKeyIn) bool CCryptoKeyStore::AddKeyPubKey(const CKey& key, const CPubKey &pubkey) { - { - LOCK(cs_KeyStore); - if (!IsCrypted()) - return CBasicKeyStore::AddKeyPubKey(key, pubkey); + LOCK(cs_KeyStore); + if (!IsCrypted()) { + return CBasicKeyStore::AddKeyPubKey(key, pubkey); + } - if (IsLocked()) - return false; + if (IsLocked()) { + return false; + } - std::vector<unsigned char> vchCryptedSecret; - CKeyingMaterial vchSecret(key.begin(), key.end()); - if (!EncryptSecret(vMasterKey, vchSecret, pubkey.GetHash(), vchCryptedSecret)) - return false; + std::vector<unsigned char> vchCryptedSecret; + CKeyingMaterial vchSecret(key.begin(), key.end()); + if (!EncryptSecret(vMasterKey, vchSecret, pubkey.GetHash(), vchCryptedSecret)) { + return false; + } - if (!AddCryptedKey(pubkey, vchCryptedSecret)) - return false; + if (!AddCryptedKey(pubkey, vchCryptedSecret)) { + return false; } return true; } @@ -228,72 +239,88 @@ bool CCryptoKeyStore::AddKeyPubKey(const CKey& key, const CPubKey &pubkey) bool CCryptoKeyStore::AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret) { - { - LOCK(cs_KeyStore); - if (!SetCrypted()) - return false; - - mapCryptedKeys[vchPubKey.GetID()] = make_pair(vchPubKey, vchCryptedSecret); + LOCK(cs_KeyStore); + if (!SetCrypted()) { + return false; } + + mapCryptedKeys[vchPubKey.GetID()] = make_pair(vchPubKey, vchCryptedSecret); return true; } +bool CCryptoKeyStore::HaveKey(const CKeyID &address) const +{ + LOCK(cs_KeyStore); + if (!IsCrypted()) { + return CBasicKeyStore::HaveKey(address); + } + return mapCryptedKeys.count(address) > 0; +} + bool CCryptoKeyStore::GetKey(const CKeyID &address, CKey& keyOut) const { - { - LOCK(cs_KeyStore); - if (!IsCrypted()) - return CBasicKeyStore::GetKey(address, keyOut); + LOCK(cs_KeyStore); + if (!IsCrypted()) { + return CBasicKeyStore::GetKey(address, keyOut); + } - CryptedKeyMap::const_iterator mi = mapCryptedKeys.find(address); - if (mi != mapCryptedKeys.end()) - { - const CPubKey &vchPubKey = (*mi).second.first; - const std::vector<unsigned char> &vchCryptedSecret = (*mi).second.second; - return DecryptKey(vMasterKey, vchCryptedSecret, vchPubKey, keyOut); - } + CryptedKeyMap::const_iterator mi = mapCryptedKeys.find(address); + if (mi != mapCryptedKeys.end()) + { + const CPubKey &vchPubKey = (*mi).second.first; + const std::vector<unsigned char> &vchCryptedSecret = (*mi).second.second; + return DecryptKey(vMasterKey, vchCryptedSecret, vchPubKey, keyOut); } return false; } bool CCryptoKeyStore::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const { + LOCK(cs_KeyStore); + if (!IsCrypted()) + return CBasicKeyStore::GetPubKey(address, vchPubKeyOut); + + CryptedKeyMap::const_iterator mi = mapCryptedKeys.find(address); + if (mi != mapCryptedKeys.end()) { - LOCK(cs_KeyStore); - if (!IsCrypted()) - return CBasicKeyStore::GetPubKey(address, vchPubKeyOut); + vchPubKeyOut = (*mi).second.first; + return true; + } + // Check for watch-only pubkeys + return CBasicKeyStore::GetPubKey(address, vchPubKeyOut); +} - CryptedKeyMap::const_iterator mi = mapCryptedKeys.find(address); - if (mi != mapCryptedKeys.end()) - { - vchPubKeyOut = (*mi).second.first; - return true; - } - // Check for watch-only pubkeys - return CBasicKeyStore::GetPubKey(address, vchPubKeyOut); +std::set<CKeyID> CCryptoKeyStore::GetKeys() const +{ + LOCK(cs_KeyStore); + if (!IsCrypted()) { + return CBasicKeyStore::GetKeys(); } + std::set<CKeyID> set_address; + for (const auto& mi : mapCryptedKeys) { + set_address.insert(mi.first); + } + return set_address; } bool CCryptoKeyStore::EncryptKeys(CKeyingMaterial& vMasterKeyIn) { + LOCK(cs_KeyStore); + if (!mapCryptedKeys.empty() || IsCrypted()) + return false; + + fUseCrypto = true; + for (KeyMap::value_type& mKey : mapKeys) { - LOCK(cs_KeyStore); - if (!mapCryptedKeys.empty() || IsCrypted()) + const CKey &key = mKey.second; + CPubKey vchPubKey = key.GetPubKey(); + CKeyingMaterial vchSecret(key.begin(), key.end()); + std::vector<unsigned char> vchCryptedSecret; + if (!EncryptSecret(vMasterKeyIn, vchSecret, vchPubKey.GetHash(), vchCryptedSecret)) + return false; + if (!AddCryptedKey(vchPubKey, vchCryptedSecret)) return false; - - fUseCrypto = true; - for (KeyMap::value_type& mKey : mapKeys) - { - const CKey &key = mKey.second; - CPubKey vchPubKey = key.GetPubKey(); - CKeyingMaterial vchSecret(key.begin(), key.end()); - std::vector<unsigned char> vchCryptedSecret; - if (!EncryptSecret(vMasterKeyIn, vchSecret, vchPubKey.GetHash(), vchCryptedSecret)) - return false; - if (!AddCryptedKey(vchPubKey, vchCryptedSecret)) - return false; - } - mapKeys.clear(); } + mapKeys.clear(); return true; } diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h index 1416ae7d02..67c8481196 100644 --- a/src/wallet/crypter.h +++ b/src/wallet/crypter.h @@ -139,52 +139,16 @@ public: { } - bool IsCrypted() const - { - return fUseCrypto; - } - - bool IsLocked() const - { - if (!IsCrypted()) - return false; - bool result; - { - LOCK(cs_KeyStore); - result = vMasterKey.empty(); - } - return result; - } - + bool IsCrypted() const { return fUseCrypto; } + bool IsLocked() const; bool Lock(); virtual bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; - bool HaveKey(const CKeyID &address) const override - { - { - LOCK(cs_KeyStore); - if (!IsCrypted()) { - return CBasicKeyStore::HaveKey(address); - } - return mapCryptedKeys.count(address) > 0; - } - return false; - } + bool HaveKey(const CKeyID &address) const override; bool GetKey(const CKeyID &address, CKey& keyOut) const override; bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; - std::set<CKeyID> GetKeys() const override - { - LOCK(cs_KeyStore); - if (!IsCrypted()) { - return CBasicKeyStore::GetKeys(); - } - std::set<CKeyID> set_address; - for (const auto& mi : mapCryptedKeys) { - set_address.insert(mi.first); - } - return set_address; - } + std::set<CKeyID> GetKeys() const override; /** * Wallet status (encrypted, locked) changed. diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index d66ba48421..ca8e680ff7 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -20,6 +20,40 @@ #include <boost/thread.hpp> +namespace { +//! Make sure database has a unique fileid within the environment. If it +//! doesn't, throw an error. BDB caches do not work properly when more than one +//! open database has the same fileid (values written to one database may show +//! up in reads to other databases). +//! +//! BerkeleyDB generates unique fileids by default +//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html), +//! so bitcoin should never create different databases with the same fileid, but +//! this error can be triggered if users manually copy database files. +void CheckUniqueFileid(const CDBEnv& env, const std::string& filename, Db& db) +{ + if (env.IsMock()) return; + + u_int8_t fileid[DB_FILE_ID_LEN]; + int ret = db.get_mpf()->get_fileid(fileid); + if (ret != 0) { + throw std::runtime_error(strprintf("CDB: Can't open database %s (get_fileid failed with %d)", filename, ret)); + } + + for (const auto& item : env.mapDb) { + u_int8_t item_fileid[DB_FILE_ID_LEN]; + if (item.second && item.second->get_mpf()->get_fileid(item_fileid) == 0 && + memcmp(fileid, item_fileid, sizeof(fileid)) == 0) { + const char* item_filename = nullptr; + item.second->get_dbname(&item_filename, nullptr); + throw std::runtime_error(strprintf("CDB: Can't open database %s (duplicates fileid %s from %s)", filename, + HexStr(std::begin(item_fileid), std::end(item_fileid)), + item_filename ? item_filename : "(unknown database)")); + } + } +} +} // namespace + // // CDB // @@ -41,13 +75,12 @@ void CDBEnv::EnvShutdown() void CDBEnv::Reset() { - delete dbenv; - dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS); + dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS)); fDbEnvInit = false; fMockDb = false; } -CDBEnv::CDBEnv() : dbenv(nullptr) +CDBEnv::CDBEnv() { Reset(); } @@ -55,8 +88,6 @@ CDBEnv::CDBEnv() : dbenv(nullptr) CDBEnv::~CDBEnv() { EnvShutdown(); - delete dbenv; - dbenv = nullptr; } void CDBEnv::Close() @@ -148,7 +179,7 @@ CDBEnv::VerifyResult CDBEnv::Verify(const std::string& strFile, recoverFunc_type LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); - Db db(dbenv, 0); + Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) return VERIFY_OK; @@ -191,7 +222,7 @@ bool CDB::Recover(const std::string& filename, void *callbackDataIn, bool (*reco } LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size()); - std::unique_ptr<Db> pdbCopy(new Db(bitdb.dbenv, 0)); + std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(bitdb.dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer filename.c_str(), // Filename "main", // Logical db name @@ -300,7 +331,7 @@ bool CDBEnv::Salvage(const std::string& strFile, bool fAggressive, std::vector<C std::stringstream strDump; - Db db(dbenv, 0); + Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, &strDump, flags); if (result == DB_VERIFY_BAD) { LogPrintf("CDBEnv::Salvage: Database salvage found errors, all data may not be recoverable.\n"); @@ -379,35 +410,34 @@ CDB::CDB(CWalletDBWrapper& dbw, const char* pszMode, bool fFlushOnCloseIn) : pdb if (!env->Open(GetDataDir())) throw std::runtime_error("CDB: Failed to open database environment."); - strFile = strFilename; - ++env->mapFileUseCount[strFile]; - pdb = env->mapDb[strFile]; + pdb = env->mapDb[strFilename]; if (pdb == nullptr) { int ret; - pdb = new Db(env->dbenv, 0); + std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0); bool fMockDb = env->IsMock(); if (fMockDb) { - DbMpoolFile* mpf = pdb->get_mpf(); + DbMpoolFile* mpf = pdb_temp->get_mpf(); ret = mpf->set_flags(DB_MPOOL_NOFILE, 1); - if (ret != 0) - throw std::runtime_error(strprintf("CDB: Failed to configure for no temp file backing for database %s", strFile)); + if (ret != 0) { + throw std::runtime_error(strprintf("CDB: Failed to configure for no temp file backing for database %s", strFilename)); + } } - ret = pdb->open(nullptr, // Txn pointer - fMockDb ? nullptr : strFile.c_str(), // Filename - fMockDb ? strFile.c_str() : "main", // Logical db name - DB_BTREE, // Database type - nFlags, // Flags + ret = pdb_temp->open(nullptr, // Txn pointer + fMockDb ? nullptr : strFilename.c_str(), // Filename + fMockDb ? strFilename.c_str() : "main", // Logical db name + DB_BTREE, // Database type + nFlags, // Flags 0); if (ret != 0) { - delete pdb; - pdb = nullptr; - --env->mapFileUseCount[strFile]; - strFile = ""; throw std::runtime_error(strprintf("CDB: Error %d, can't open database %s", ret, strFilename)); } + CheckUniqueFileid(*env, strFilename, *pdb_temp); + + pdb = pdb_temp.release(); + env->mapDb[strFilename] = pdb; if (fCreate && !Exists(std::string("version"))) { bool fTmp = fReadOnly; @@ -415,9 +445,9 @@ CDB::CDB(CWalletDBWrapper& dbw, const char* pszMode, bool fFlushOnCloseIn) : pdb WriteVersion(CLIENT_VERSION); fReadOnly = fTmp; } - - env->mapDb[strFile] = pdb; } + ++env->mapFileUseCount[strFilename]; + strFile = strFilename; } } @@ -492,7 +522,7 @@ bool CDB::Rewrite(CWalletDBWrapper& dbw, const char* pszSkip) std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} CDB db(dbw, "r"); - Db* pdbCopy = new Db(env->dbenv, 0); + std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer strFileRes.c_str(), // Filename @@ -541,13 +571,12 @@ bool CDB::Rewrite(CWalletDBWrapper& dbw, const char* pszSkip) } else { pdbCopy->close(0); } - delete pdbCopy; } if (fSuccess) { - Db dbA(env->dbenv, 0); + Db dbA(env->dbenv.get(), 0); if (dbA.remove(strFile.c_str(), nullptr, 0)) fSuccess = false; - Db dbB(env->dbenv, 0); + Db dbB(env->dbenv.get(), 0); if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0)) fSuccess = false; } @@ -672,6 +701,11 @@ bool CWalletDBWrapper::Backup(const std::string& strDest) pathDest /= strFile; try { + if (fs::equivalent(pathSrc, pathDest)) { + LogPrintf("cannot backup to wallet source file %s\n", pathDest.string()); + return false; + } + fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists); LogPrintf("copied %s to %s\n", strFile, pathDest.string()); return true; diff --git a/src/wallet/db.h b/src/wallet/db.h index 14283ac8f8..e6d5a9f293 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -36,7 +36,7 @@ private: public: mutable CCriticalSection cs_db; - DbEnv *dbenv; + std::unique_ptr<DbEnv> dbenv; std::map<std::string, int> mapFileUseCount; std::map<std::string, Db*> mapDb; diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp index 6abd060714..8b7c50b4e9 100644 --- a/src/wallet/feebumper.cpp +++ b/src/wallet/feebumper.cpp @@ -89,7 +89,7 @@ CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, const CCoin return; } - if (!SignalsOptInRBF(wtx)) { + if (!SignalsOptInRBF(*wtx.tx)) { vErrors.push_back("Transaction is not BIP 125 replaceable"); currentResult = BumpFeeResult::WALLET_ERROR; return; @@ -103,7 +103,7 @@ CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, const CCoin // check that original tx consists entirely of our inputs // if not, we can't bump the fee, because the wallet has no way of knowing the value of the other inputs (thus the fee) - if (!pWallet->IsAllFromMe(wtx, ISMINE_SPENDABLE)) { + if (!pWallet->IsAllFromMe(*wtx.tx, ISMINE_SPENDABLE)) { vErrors.push_back("Transaction contains inputs that don't belong to this wallet"); currentResult = BumpFeeResult::WALLET_ERROR; return; @@ -196,7 +196,13 @@ CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, const CCoin // moment earlier. In this case, we report an error to the user, who may use totalFee to make an adjustment. CFeeRate minMempoolFeeRate = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000); if (nNewFeeRate.GetFeePerK() < minMempoolFeeRate.GetFeePerK()) { - vErrors.push_back(strprintf("New fee rate (%s) is less than the minimum fee rate (%s) to get into the mempool. totalFee value should to be at least %s or settxfee value should be at least %s to add transaction.", FormatMoney(nNewFeeRate.GetFeePerK()), FormatMoney(minMempoolFeeRate.GetFeePerK()), FormatMoney(minMempoolFeeRate.GetFee(maxNewTxSize)), FormatMoney(minMempoolFeeRate.GetFeePerK()))); + vErrors.push_back(strprintf( + "New fee rate (%s) is lower than the minimum fee rate (%s) to get into the mempool -- " + "the totalFee value should be at least %s or the settxfee value should be at least %s to add transaction", + FormatMoney(nNewFeeRate.GetFeePerK()), + FormatMoney(minMempoolFeeRate.GetFeePerK()), + FormatMoney(minMempoolFeeRate.GetFee(maxNewTxSize)), + FormatMoney(minMempoolFeeRate.GetFeePerK()))); currentResult = BumpFeeResult::WALLET_ERROR; return; } @@ -267,7 +273,7 @@ bool CFeeBumper::commit(CWallet *pWallet) CValidationState state; if (!pWallet->CommitTransaction(wtxBumped, reservekey, g_connman.get(), state)) { // NOTE: CommitTransaction never returns false, so this should never happen. - vErrors.push_back(strprintf("Error: The transaction was rejected! Reason given: %s", state.GetRejectReason())); + vErrors.push_back(strprintf("The transaction was rejected: %s", state.GetRejectReason())); return false; } @@ -275,7 +281,7 @@ bool CFeeBumper::commit(CWallet *pWallet) if (state.IsInvalid()) { // This can happen if the mempool rejected the transaction. Report // what happened in the "errors" response. - vErrors.push_back(strprintf("Error: The transaction was rejected: %s", FormatStateMessage(state))); + vErrors.push_back(strprintf("The transaction was rejected: %s", FormatStateMessage(state))); } // mark the original tx as bumped @@ -284,7 +290,7 @@ bool CFeeBumper::commit(CWallet *pWallet) // along with an exception. It would be good to return information about // wtxBumped to the caller even if marking the original transaction // replaced does not succeed for some reason. - vErrors.push_back("Error: Created new bumpfee transaction but could not mark the original transaction as replaced."); + vErrors.push_back("Created new bumpfee transaction but could not mark the original transaction as replaced"); } return true; } diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index c984df1df8..6b5d4cc668 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -53,11 +53,16 @@ std::string GetWalletHelpString(bool showDebug) bool WalletParameterInteraction() { - gArgs.SoftSetArg("-wallet", DEFAULT_WALLET_DAT); - const bool is_multiwallet = gArgs.GetArgs("-wallet").size() > 1; + if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { + for (const std::string& wallet : gArgs.GetArgs("-wallet")) { + LogPrintf("%s: parameter interaction: -disablewallet -> ignoring -wallet=%s\n", __func__, wallet); + } - if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) return true; + } + + gArgs.SoftSetArg("-wallet", DEFAULT_WALLET_DAT); + const bool is_multiwallet = gArgs.GetArgs("-wallet").size() > 1; if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY) && gArgs.SoftSetBoolArg("-walletbroadcast", false)) { LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -walletbroadcast=0\n", __func__); @@ -173,15 +178,18 @@ bool WalletParameterInteraction() void RegisterWalletRPC(CRPCTable &t) { - if (gArgs.GetBoolArg("-disablewallet", false)) return; + if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { + return; + } RegisterWalletRPCCommands(t); } bool VerifyWallets() { - if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) + if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { return true; + } uiInterface.InitMessage(_("Verifying wallet(s)...")); diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index d6ea2a9db7..30246a534b 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -81,7 +81,7 @@ UniValue importprivkey(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 3) throw std::runtime_error( "importprivkey \"privkey\" ( \"label\" ) ( rescan )\n" - "\nAdds a private key (as returned by dumpprivkey) to your wallet.\n" + "\nAdds a private key (as returned by dumpprivkey) to your wallet. Requires a new wallet backup.\n" "\nArguments:\n" "1. \"privkey\" (string, required) The private key (see dumpprivkey)\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" @@ -226,7 +226,7 @@ UniValue importaddress(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) throw std::runtime_error( "importaddress \"address\" ( \"label\" rescan p2sh )\n" - "\nAdds a script (in hex) or address that can be watched as if it were in your wallet but cannot be used to spend.\n" + "\nAdds a script (in hex) or address that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nArguments:\n" "1. \"script\" (string, required) The hex-encoded script (or address)\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" @@ -340,7 +340,7 @@ UniValue importprunedfunds(const JSONRPCRequest& request) LOCK2(cs_main, pwallet->cs_wallet); - if (pwallet->IsMine(wtx)) { + if (pwallet->IsMine(*wtx.tx)) { pwallet->AddToWallet(wtx, false); return NullUniValue; } @@ -396,7 +396,7 @@ UniValue importpubkey(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) throw std::runtime_error( "importpubkey \"pubkey\" ( \"label\" rescan )\n" - "\nAdds a public key (in hex) that can be watched as if it were in your wallet but cannot be used to spend.\n" + "\nAdds a public key (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nArguments:\n" "1. \"pubkey\" (string, required) The hex-encoded public key\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" @@ -456,7 +456,7 @@ UniValue importwallet(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "importwallet \"filename\"\n" - "\nImports keys from a wallet dump file (see dumpwallet).\n" + "\nImports keys from a wallet dump file (see dumpwallet). Requires a new wallet backup to include imported keys.\n" "\nArguments:\n" "1. \"filename\" (string, required) The wallet file\n" "\nExamples:\n" @@ -601,6 +601,9 @@ UniValue dumpwallet(const JSONRPCRequest& request) throw std::runtime_error( "dumpwallet \"filename\"\n" "\nDumps all wallet keys in a human-readable format to a server-side file. This does not allow overwriting existing files.\n" + "Imported scripts are not currently included in wallet dumps, these must be backed up separately.\n" + "Note that if your wallet contains keys which are not derived from your HD seed (e.g. imported keys), these are not covered by\n" + "only backing up the seed itself, and must be backed up too (e.g. ensure you back up the whole dumpfile).\n" "\nArguments:\n" "1. \"filename\" (string, required) The filename with path (either absolute or relative to bitcoind)\n" "\nResult:\n" @@ -961,7 +964,7 @@ UniValue ProcessImport(CWallet * const pwallet, const UniValue& data, const int6 pwallet->SetAddressBook(vchAddress, label, "receive"); if (pwallet->HaveKey(vchAddress)) { - return false; + throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->mapKeyMetadata[vchAddress].nCreateTime = timestamp; @@ -1039,7 +1042,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest) if (mainRequest.fHelp || mainRequest.params.size() < 1 || mainRequest.params.size() > 2) throw std::runtime_error( "importmulti \"requests\" ( \"options\" )\n\n" - "Import addresses/scripts (with private or public keys, redeem script (P2SH)), rescanning all addresses in one-shot-only (rescan can be disabled via options).\n\n" + "Import addresses/scripts (with private or public keys, redeem script (P2SH)), rescanning all addresses in one-shot-only (rescan can be disabled via options). Requires a new wallet backup.\n\n" "Arguments:\n" "1. requests (array, required) Data to be imported\n" " [ (array of json objects)\n" diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index e23ef57db0..8abb4c9a25 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -108,7 +108,7 @@ void WalletTxToJSON(const CWalletTx& wtx, UniValue& entry) std::string rbfStatus = "no"; if (confirms <= 0) { LOCK(mempool.cs); - RBFTransactionState rbfState = IsRBFOptIn(wtx, mempool); + RBFTransactionState rbfState = IsRBFOptIn(*wtx.tx, mempool); if (rbfState == RBF_TRANSACTIONSTATE_UNKNOWN) rbfStatus = "unknown"; else if (rbfState == RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125) @@ -654,7 +654,7 @@ UniValue getreceivedbyaddress(const JSONRPCRequest& request) } CScript scriptPubKey = GetScriptForDestination(dest); if (!IsMine(*pwallet, scriptPubKey)) { - return ValueFromAmount(0); + throw JSONRPCError(RPC_WALLET_ERROR, "Address not found in wallet"); } // Minimum confirmations @@ -1110,7 +1110,7 @@ UniValue addmultisigaddress(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { std::string msg = "addmultisigaddress nrequired [\"key\",...] ( \"account\" )\n" - "\nAdd a nrequired-to-sign multisignature address to the wallet.\n" + "\nAdd a nrequired-to-sign multisignature address to the wallet. Requires a new wallet backup.\n" "Each key is a Bitcoin address or hex-encoded public key.\n" "If 'account' is specified (DEPRECATED), assign address to that account.\n" @@ -1228,7 +1228,7 @@ UniValue addwitnessaddress(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { std::string msg = "addwitnessaddress \"address\" ( p2sh )\n" - "\nAdd a witness address for a script (with pubkey or redeemscript known).\n" + "\nAdd a witness address for a script (with pubkey or redeemscript known). Requires a new wallet backup.\n" "It returns the witness script.\n" "\nArguments:\n" @@ -1893,19 +1893,20 @@ UniValue listsinceblock(const JSONRPCRequest& request) int target_confirms = 1; isminefilter filter = ISMINE_SPENDABLE; - if (!request.params[0].isNull()) { + if (!request.params[0].isNull() && !request.params[0].get_str().empty()) { uint256 blockId; blockId.SetHex(request.params[0].get_str()); BlockMap::iterator it = mapBlockIndex.find(blockId); - if (it != mapBlockIndex.end()) { - paltindex = pindex = it->second; - if (chainActive[pindex->nHeight] != pindex) { - // the block being asked for is a part of a deactivated chain; - // we don't want to depend on its perceived height in the block - // chain, we want to instead use the last common ancestor - pindex = chainActive.FindFork(pindex); - } + if (it == mapBlockIndex.end()) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); + } + paltindex = pindex = it->second; + if (chainActive[pindex->nHeight] != pindex) { + // the block being asked for is a part of a deactivated chain; + // we don't want to depend on its perceived height in the block + // chain, we want to instead use the last common ancestor + pindex = chainActive.FindFork(pindex); } } @@ -2050,7 +2051,7 @@ UniValue gettransaction(const JSONRPCRequest& request) ListTransactions(pwallet, wtx, "*", 0, false, details, filter); entry.push_back(Pair("details", details)); - std::string strHex = EncodeHexTx(static_cast<CTransaction>(wtx), RPCSerializationFlags()); + std::string strHex = EncodeHexTx(*wtx.tx, RPCSerializationFlags()); entry.push_back(Pair("hex", strHex)); return entry; @@ -2179,7 +2180,7 @@ UniValue walletpassphrase(const JSONRPCRequest& request) return NullUniValue; } - if (pwallet->IsCrypted() && (request.fHelp || request.params.size() != 2)) { + if (request.fHelp || request.params.size() != 2) { throw std::runtime_error( "walletpassphrase \"passphrase\" timeout\n" "\nStores the wallet decryption key in memory for 'timeout' seconds.\n" @@ -2243,7 +2244,7 @@ UniValue walletpassphrasechange(const JSONRPCRequest& request) return NullUniValue; } - if (pwallet->IsCrypted() && (request.fHelp || request.params.size() != 2)) { + if (request.fHelp || request.params.size() != 2) { throw std::runtime_error( "walletpassphrasechange \"oldpassphrase\" \"newpassphrase\"\n" "\nChanges the wallet passphrase from 'oldpassphrase' to 'newpassphrase'.\n" @@ -2294,7 +2295,7 @@ UniValue walletlock(const JSONRPCRequest& request) return NullUniValue; } - if (pwallet->IsCrypted() && (request.fHelp || request.params.size() != 0)) { + if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "walletlock\n" "\nRemoves the wallet encryption key from memory, locking the wallet.\n" @@ -2334,7 +2335,7 @@ UniValue encryptwallet(const JSONRPCRequest& request) return NullUniValue; } - if (!pwallet->IsCrypted() && (request.fHelp || request.params.size() != 1)) { + if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "encryptwallet \"passphrase\"\n" "\nEncrypts the wallet with 'passphrase'. This is for first time encryption.\n" @@ -3233,7 +3234,7 @@ UniValue rescanblockchain(const JSONRPCRequest& request) "}\n" "\nExamples:\n" + HelpExampleCli("rescanblockchain", "100000 120000") - + HelpExampleRpc("rescanblockchain", "100000 120000") + + HelpExampleRpc("rescanblockchain", "100000, 120000") ); } diff --git a/src/wallet/test/accounting_tests.cpp b/src/wallet/test/accounting_tests.cpp index 330878ceb5..64244dceea 100644 --- a/src/wallet/test/accounting_tests.cpp +++ b/src/wallet/test/accounting_tests.cpp @@ -83,7 +83,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade) wtx.mapValue["comment"] = "y"; { - CMutableTransaction tx(wtx); + CMutableTransaction tx(*wtx.tx); --tx.nLockTime; // Just to change the hash :) wtx.SetTx(MakeTransactionRef(std::move(tx))); } @@ -93,7 +93,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade) wtx.mapValue["comment"] = "x"; { - CMutableTransaction tx(wtx); + CMutableTransaction tx(*wtx.tx); --tx.nLockTime; // Just to change the hash :) wtx.SetTx(MakeTransactionRef(std::move(tx))); } diff --git a/src/wallet/test/wallet_test_fixture.cpp b/src/wallet/test/wallet_test_fixture.cpp index e2f48c45ab..34f210fcd7 100644 --- a/src/wallet/test/wallet_test_fixture.cpp +++ b/src/wallet/test/wallet_test_fixture.cpp @@ -8,7 +8,7 @@ #include "wallet/db.h" #include "wallet/wallet.h" -CWallet *pwalletMain; +std::unique_ptr<CWallet> pwalletMain; WalletTestingSetup::WalletTestingSetup(const std::string& chainName): TestingSetup(chainName) @@ -17,18 +17,17 @@ WalletTestingSetup::WalletTestingSetup(const std::string& chainName): bool fFirstRun; std::unique_ptr<CWalletDBWrapper> dbw(new CWalletDBWrapper(&bitdb, "wallet_test.dat")); - pwalletMain = new CWallet(std::move(dbw)); + pwalletMain = MakeUnique<CWallet>(std::move(dbw)); pwalletMain->LoadWallet(fFirstRun); - RegisterValidationInterface(pwalletMain); + RegisterValidationInterface(pwalletMain.get()); RegisterWalletRPCCommands(tableRPC); } WalletTestingSetup::~WalletTestingSetup() { - UnregisterValidationInterface(pwalletMain); - delete pwalletMain; - pwalletMain = nullptr; + UnregisterValidationInterface(pwalletMain.get()); + pwalletMain.reset(); bitdb.Flush(true); bitdb.Reset(); diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 2b12168c65..503dedb5b3 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -489,6 +489,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) vpwallets[0] = &wallet; ::importwallet(request); + LOCK(wallet.cs_wallet); BOOST_CHECK_EQUAL(wallet.mapWallet.size(), 3); BOOST_CHECK_EQUAL(coinbaseTxns.size(), 103); for (size_t i = 0; i < coinbaseTxns.size(); ++i) { @@ -534,6 +535,7 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64 SetMockTime(mockTime); CBlockIndex* block = nullptr; if (blockTime > 0) { + LOCK(cs_main); auto inserted = mapBlockIndex.emplace(GetRandHash(), new CBlockIndex); assert(inserted.second); const uint256& hash = inserted.first->first; @@ -547,6 +549,7 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64 wtx.SetMerkleBranch(block, 0); } wallet.AddToWallet(wtx); + LOCK(wallet.cs_wallet); return wallet.mapWallet.at(wtx.GetHash()).nTimeSmart; } @@ -583,6 +586,7 @@ BOOST_AUTO_TEST_CASE(ComputeTimeSmart) BOOST_AUTO_TEST_CASE(LoadReceiveRequests) { CTxDestination dest = CKeyID(); + LOCK(pwalletMain->cs_wallet); pwalletMain->AddDestData(dest, "misc", "val_misc"); pwalletMain->AddDestData(dest, "rr0", "val_rr0"); pwalletMain->AddDestData(dest, "rr1", "val_rr1"); @@ -625,6 +629,7 @@ public: BOOST_CHECK(wallet->CreateTransaction({recipient}, wtx, reservekey, fee, changePos, error, dummy)); CValidationState state; BOOST_CHECK(wallet->CommitTransaction(wtx, reservekey, nullptr, state)); + LOCK(wallet->cs_wallet); auto it = wallet->mapWallet.find(wtx.GetHash()); BOOST_CHECK(it != wallet->mapWallet.end()); CreateAndProcessBlock({CMutableTransaction(*it->second.tx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 3672ecea5b..dceb818b50 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -532,6 +532,9 @@ void CWallet::SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator> ran copyFrom = &mapWallet[hash]; } } + + assert(copyFrom); + // Now copy data from copyFrom to rest: for (TxSpends::iterator it = range.first; it != range.second; ++it) { @@ -1718,7 +1721,7 @@ CAmount CWalletTx::GetDebit(const isminefilter& filter) const debit += nDebitCached; else { - nDebitCached = pwallet->GetDebit(*this, ISMINE_SPENDABLE); + nDebitCached = pwallet->GetDebit(*tx, ISMINE_SPENDABLE); fDebitCached = true; debit += nDebitCached; } @@ -1729,7 +1732,7 @@ CAmount CWalletTx::GetDebit(const isminefilter& filter) const debit += nWatchDebitCached; else { - nWatchDebitCached = pwallet->GetDebit(*this, ISMINE_WATCH_ONLY); + nWatchDebitCached = pwallet->GetDebit(*tx, ISMINE_WATCH_ONLY); fWatchDebitCached = true; debit += nWatchDebitCached; } @@ -1751,7 +1754,7 @@ CAmount CWalletTx::GetCredit(const isminefilter& filter) const credit += nCreditCached; else { - nCreditCached = pwallet->GetCredit(*this, ISMINE_SPENDABLE); + nCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE); fCreditCached = true; credit += nCreditCached; } @@ -1762,7 +1765,7 @@ CAmount CWalletTx::GetCredit(const isminefilter& filter) const credit += nWatchCreditCached; else { - nWatchCreditCached = pwallet->GetCredit(*this, ISMINE_WATCH_ONLY); + nWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY); fWatchCreditCached = true; credit += nWatchCreditCached; } @@ -1776,7 +1779,7 @@ CAmount CWalletTx::GetImmatureCredit(bool fUseCache) const { if (fUseCache && fImmatureCreditCached) return nImmatureCreditCached; - nImmatureCreditCached = pwallet->GetCredit(*this, ISMINE_SPENDABLE); + nImmatureCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE); fImmatureCreditCached = true; return nImmatureCreditCached; } @@ -1820,7 +1823,7 @@ CAmount CWalletTx::GetImmatureWatchOnlyCredit(const bool& fUseCache) const { if (fUseCache && fImmatureWatchCreditCached) return nImmatureWatchCreditCached; - nImmatureWatchCreditCached = pwallet->GetCredit(*this, ISMINE_WATCH_ONLY); + nImmatureWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY); fImmatureWatchCreditCached = true; return nImmatureWatchCreditCached; } @@ -1861,7 +1864,7 @@ CAmount CWalletTx::GetChange() const { if (fChangeCached) return nChangeCached; - nChangeCached = pwallet->GetChange(*this); + nChangeCached = pwallet->GetChange(*tx); fChangeCached = true; return nChangeCached; } @@ -1875,7 +1878,7 @@ bool CWalletTx::InMempool() const bool CWalletTx::IsTrusted() const { // Quick answer in most cases - if (!CheckFinalTx(*this)) + if (!CheckFinalTx(*tx)) return false; int nDepth = GetDepthInMainChain(); if (nDepth >= 1) @@ -2133,7 +2136,7 @@ void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const const uint256& wtxid = it->first; const CWalletTx* pcoin = &(*it).second; - if (!CheckFinalTx(*pcoin)) + if (!CheckFinalTx(*pcoin->tx)) continue; if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0) @@ -2915,7 +2918,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT wtxNew.SetTx(MakeTransactionRef(std::move(txNew))); // Limit size - if (GetTransactionWeight(wtxNew) >= MAX_STANDARD_TX_WEIGHT) + if (GetTransactionWeight(*wtxNew.tx) >= MAX_STANDARD_TX_WEIGHT) { strFailReason = _("Transaction too large"); return false; @@ -3803,7 +3806,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile) uiInterface.InitMessage(_("Zapping all transactions from wallet...")); std::unique_ptr<CWalletDBWrapper> dbw(new CWalletDBWrapper(&bitdb, walletFile)); - std::unique_ptr<CWallet> tempWallet(new CWallet(std::move(dbw))); + std::unique_ptr<CWallet> tempWallet = MakeUnique<CWallet>(std::move(dbw)); DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx); if (nZapWalletRet != DB_LOAD_OK) { InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); @@ -3881,7 +3884,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile) // Top up the keypool if (!walletInstance->TopUpKeyPool()) { InitError(_("Unable to generate initial keys") += "\n"); - return NULL; + return nullptr; } walletInstance->SetBestChain(chainActive.GetLocator()); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 8315bbf3da..db2861c043 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -214,10 +214,6 @@ public: Init(); } - /** Helper conversion operator to allow passing CMerkleTx where CTransaction is expected. - * TODO: adapt callers and remove this operator. */ - operator const CTransaction&() const { return *tx; } - void Init() { hashBlock = uint256(); diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index b7f873c1e4..180706b1ed 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -268,7 +268,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CWalletTx wtx; ssValue >> wtx; CValidationState state; - if (!(CheckTransaction(wtx, state) && (wtx.GetHash() == hash) && state.IsValid())) + if (!(CheckTransaction(*wtx.tx, state) && (wtx.GetHash() == hash) && state.IsValid())) return false; // Undo serialize changes in 31600 |