diff options
Diffstat (limited to 'src')
305 files changed, 10238 insertions, 7412 deletions
diff --git a/src/Makefile.am b/src/Makefile.am index e7f1d82b8b..8c12aee217 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -87,7 +87,6 @@ BITCOIN_CORE_H = \ checkpoints.h \ checkqueue.h \ clientversion.h \ - coincontrol.h \ coins.h \ compat.h \ compat/byteswap.h \ @@ -105,13 +104,14 @@ BITCOIN_CORE_H = \ keystore.h \ dbwrapper.h \ limitedmap.h \ - main.h \ memusage.h \ merkleblock.h \ miner.h \ net.h \ + net_processing.h \ netaddress.h \ netbase.h \ + netmessagemaker.h \ noui.h \ policy/fees.h \ policy/policy.h \ @@ -133,7 +133,7 @@ BITCOIN_CORE_H = \ support/allocators/secure.h \ support/allocators/zeroafterfree.h \ support/cleanse.h \ - support/pagelocker.h \ + support/lockedpool.h \ sync.h \ threadsafety.h \ timedata.h \ @@ -145,8 +145,10 @@ BITCOIN_CORE_H = \ util.h \ utilmoneystr.h \ utiltime.h \ + validation.h \ validationinterface.h \ versionbits.h \ + wallet/coincontrol.h \ wallet/crypter.h \ wallet/db.h \ wallet/rpcwallet.h \ @@ -178,10 +180,10 @@ libbitcoin_server_a_SOURCES = \ httpserver.cpp \ init.cpp \ dbwrapper.cpp \ - main.cpp \ merkleblock.cpp \ miner.cpp \ net.cpp \ + net_processing.cpp \ noui.cpp \ policy/fees.cpp \ policy/policy.cpp \ @@ -200,6 +202,7 @@ libbitcoin_server_a_SOURCES = \ txdb.cpp \ txmempool.cpp \ ui_interface.cpp \ + validation.cpp \ validationinterface.cpp \ versionbits.cpp \ $(BITCOIN_CORE_H) @@ -310,7 +313,7 @@ libbitcoin_common_a_SOURCES = \ libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libbitcoin_util_a_SOURCES = \ - support/pagelocker.cpp \ + support/lockedpool.cpp \ chainparamsbase.cpp \ clientversion.cpp \ compat/glibc_sanity.cpp \ diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index c83432e91a..e58bd9dfbf 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -6,18 +6,27 @@ bin_PROGRAMS += bench/bench_bitcoin BENCH_SRCDIR = bench BENCH_BINARY = bench/bench_bitcoin$(EXEEXT) +RAW_TEST_FILES = \ + bench/data/block413567.raw +GENERATED_TEST_FILES = $(RAW_TEST_FILES:.raw=.raw.h) bench_bench_bitcoin_SOURCES = \ bench/bench_bitcoin.cpp \ bench/bench.cpp \ bench/bench.h \ + bench/checkblock.cpp \ bench/Examples.cpp \ bench/rollingbloom.cpp \ bench/crypto_hash.cpp \ bench/ccoins_caching.cpp \ bench/mempool_eviction.cpp \ bench/verify_script.cpp \ - bench/base58.cpp + bench/base58.cpp \ + bench/lockedpool.cpp \ + bench/perf.cpp \ + bench/perf.h + +nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_TEST_FILES) bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/ bench_bench_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) @@ -44,10 +53,12 @@ endif bench_bench_bitcoin_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) bench_bench_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) -CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno +CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno $(GENERATED_TEST_FILES) CLEANFILES += $(CLEAN_BITCOIN_BENCH) +bench/checkblock.cpp: bench/data/block413567.raw.h + bitcoin_bench: $(BENCH_BINARY) bench: $(BENCH_BINARY) FORCE @@ -55,3 +66,12 @@ bench: $(BENCH_BINARY) FORCE bitcoin_bench_clean : FORCE rm -f $(CLEAN_BITCOIN_BENCH) $(bench_bench_bitcoin_OBJECTS) $(BENCH_BINARY) + +%.raw.h: %.raw + @$(MKDIR_P) $(@D) + @{ \ + echo "static unsigned const char $(*F)[] = {" && \ + $(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' && \ + echo "};"; \ + } > "$@.new" && mv -f "$@.new" "$@" + @echo "Generated $@" diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 1f9a901d75..48abb9b4a2 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -271,6 +271,7 @@ RES_ICONS = \ qt/res/icons/key.png \ qt/res/icons/lock_closed.png \ qt/res/icons/lock_open.png \ + qt/res/icons/network_disabled.png \ qt/res/icons/open.png \ qt/res/icons/overview.png \ qt/res/icons/quit.png \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 8178fb7429..60fd13c90d 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -50,7 +50,6 @@ BITCOIN_TESTS =\ test/bip32_tests.cpp \ test/blockencodings_tests.cpp \ test/bloom_tests.cpp \ - test/Checkpoints_tests.cpp \ test/coins_tests.cpp \ test/compress_tests.cpp \ test/crypto_tests.cpp \ @@ -86,6 +85,7 @@ BITCOIN_TESTS =\ test/streams_tests.cpp \ test/test_bitcoin.cpp \ test/test_bitcoin.h \ + test/test_random.h \ test/testutil.cpp \ test/testutil.h \ test/timedata_tests.cpp \ @@ -150,16 +150,10 @@ endif %.json.h: %.json @$(MKDIR_P) $(@D) - @echo "namespace json_tests{" > $@ - @echo "static unsigned const char $(*F)[] = {" >> $@ - @$(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' >> $@ - @echo "};};" >> $@ - @echo "Generated $@" - -%.raw.h: %.raw - @$(MKDIR_P) $(@D) - @echo "namespace alert_tests{" > $@ - @echo "static unsigned const char $(*F)[] = {" >> $@ - @$(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' >> $@ - @echo "};};" >> $@ + @{ \ + echo "namespace json_tests{" && \ + echo "static unsigned const char $(*F)[] = {" && \ + $(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' && \ + echo "};};"; \ + } > "$@.new" && mv -f "$@.new" "$@" @echo "Generated $@" diff --git a/src/addrdb.cpp b/src/addrdb.cpp index ddf41f92de..23715bbea4 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -103,7 +103,7 @@ bool CBanDB::Read(banmap_t& banSet) if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp))) return error("%s: Invalid network magic number", __func__); - // de-serialize address data into one CAddrMan object + // de-serialize ban data ssBanlist >> banSet; } catch (const std::exception& e) { diff --git a/src/addrdb.h b/src/addrdb.h index 62835a6fb4..339943ca5a 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -46,7 +46,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nCreateTime); READWRITE(nBanUntil); diff --git a/src/addrman.h b/src/addrman.h index e9e137c978..76e7b210f7 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -58,7 +58,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(*(CAddress*)this); READWRITE(source); READWRITE(nLastSuccess); @@ -293,7 +293,7 @@ public: * very little in common. */ template<typename Stream> - void Serialize(Stream &s, int nType, int nVersionDummy) const + void Serialize(Stream &s) const { LOCK(cs); @@ -343,7 +343,7 @@ public: } template<typename Stream> - void Unserialize(Stream& s, int nType, int nVersionDummy) + void Unserialize(Stream& s) { LOCK(cs); @@ -448,11 +448,6 @@ public: Check(); } - unsigned int GetSerializeSize(int nType, int nVersion) const - { - return (CSizeComputer(nType, nVersion) << *this).size(); - } - void Clear() { std::vector<int>().swap(vRandom); @@ -487,6 +482,7 @@ public: //! Return the number of (unique) addresses in all tables. size_t size() const { + LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead return vRandom.size(); } @@ -506,13 +502,11 @@ public: //! Add a single address. bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0) { + LOCK(cs); bool fRet = false; - { - LOCK(cs); - Check(); - fRet |= Add_(addr, source, nTimePenalty); - Check(); - } + Check(); + fRet |= Add_(addr, source, nTimePenalty); + Check(); if (fRet) LogPrint("addrman", "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew); return fRet; @@ -521,14 +515,12 @@ public: //! Add multiple addresses. bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0) { + LOCK(cs); int nAdd = 0; - { - LOCK(cs); - Check(); - for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++) - nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0; - Check(); - } + Check(); + for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++) + nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0; + Check(); if (nAdd) LogPrint("addrman", "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew); return nAdd > 0; @@ -537,23 +529,19 @@ public: //! Mark an entry as accessible. void Good(const CService &addr, int64_t nTime = GetAdjustedTime()) { - { - LOCK(cs); - Check(); - Good_(addr, nTime); - Check(); - } + LOCK(cs); + Check(); + Good_(addr, nTime); + Check(); } //! Mark an entry as connection attempted to. void Attempt(const CService &addr, bool fCountFailure, int64_t nTime = GetAdjustedTime()) { - { - LOCK(cs); - Check(); - Attempt_(addr, fCountFailure, nTime); - Check(); - } + LOCK(cs); + Check(); + Attempt_(addr, fCountFailure, nTime); + Check(); } /** @@ -587,12 +575,10 @@ public: //! Mark an entry as currently-connected-to. void Connected(const CService &addr, int64_t nTime = GetAdjustedTime()) { - { - LOCK(cs); - Check(); - Connected_(addr, nTime); - Check(); - } + LOCK(cs); + Check(); + Connected_(addr, nTime); + Check(); } void SetServices(const CService &addr, ServiceFlags nServices) diff --git a/src/amount.h b/src/amount.h index 5e52f37f23..ba0c86040f 100644 --- a/src/amount.h +++ b/src/amount.h @@ -64,7 +64,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(nSatoshisPerK); } }; diff --git a/src/arith_uint256.cpp b/src/arith_uint256.cpp index 2e61363576..a58ad01b5a 100644 --- a/src/arith_uint256.cpp +++ b/src/arith_uint256.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2014 The Bitcoin developers +// Copyright (c) 2009-2014 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/arith_uint256.h b/src/arith_uint256.h index ba3d620158..5cc52f6e72 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/base58.cpp b/src/base58.cpp index d1d60a6f1d..f7768b5a64 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -25,12 +25,14 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch) psz++; // Skip and count leading '1's. int zeroes = 0; + int length = 0; while (*psz == '1') { zeroes++; psz++; } // Allocate enough space in big-endian base256 representation. - std::vector<unsigned char> b256(strlen(psz) * 733 / 1000 + 1); // log(58) / log(256), rounded up. + int size = strlen(psz) * 733 /1000 + 1; // log(58) / log(256), rounded up. + std::vector<unsigned char> b256(size); // Process the characters. while (*psz && !isspace(*psz)) { // Decode base58 character @@ -39,12 +41,14 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch) return false; // Apply "b256 = b256 * 58 + ch". int carry = ch - pszBase58; - for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); it != b256.rend(); it++) { + int i = 0; + for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); (carry != 0 || i < length) && (it != b256.rend()); ++it, ++i) { carry += 58 * (*it); *it = carry % 256; carry /= 256; } assert(carry == 0); + length = i; psz++; } // Skip trailing spaces. @@ -53,7 +57,7 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch) if (*psz != 0) return false; // Skip leading zeroes in b256. - std::vector<unsigned char>::iterator it = b256.begin(); + std::vector<unsigned char>::iterator it = b256.begin() + (size - length); while (it != b256.end() && *it == 0) it++; // Copy result into output vector. diff --git a/src/bench/Examples.cpp b/src/bench/Examples.cpp index b6b020a971..9f35a1ea04 100644 --- a/src/bench/Examples.cpp +++ b/src/bench/Examples.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "bench.h" -#include "main.h" +#include "validation.h" #include "utiltime.h" // Sanity test: this should loop ten times, and diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp index 1279c3e7df..3319c179bf 100644 --- a/src/bench/base58.cpp +++ b/src/bench/base58.cpp @@ -1,10 +1,10 @@ -// Copyright (c) 2016 the Bitcoin Core developers +// Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "bench.h" -#include "main.h" +#include "validation.h" #include "base58.h" #include <vector> diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp index 227546a7a7..af3d152c9a 100644 --- a/src/bench/bench.cpp +++ b/src/bench/bench.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "bench.h" +#include "perf.h" #include <iostream> #include <iomanip> @@ -26,7 +27,9 @@ BenchRunner::BenchRunner(std::string name, BenchFunction func) void BenchRunner::RunAll(double elapsedTimeForOne) { - std::cout << "#Benchmark" << "," << "count" << "," << "min" << "," << "max" << "," << "average" << "\n"; + perf_init(); + std::cout << "#Benchmark" << "," << "count" << "," << "min" << "," << "max" << "," << "average" << "," + << "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n"; for (std::map<std::string,BenchFunction>::iterator it = benchmarks.begin(); it != benchmarks.end(); ++it) { @@ -35,6 +38,7 @@ BenchRunner::RunAll(double elapsedTimeForOne) BenchFunction& func = it->second; func(state); } + perf_fini(); } bool State::KeepRunning() @@ -44,8 +48,10 @@ bool State::KeepRunning() return true; } double now; + uint64_t nowCycles; if (count == 0) { lastTime = beginTime = now = gettimedouble(); + lastCycles = beginCycles = nowCycles = perf_cpucycles(); } else { now = gettimedouble(); @@ -53,6 +59,13 @@ bool State::KeepRunning() double elapsedOne = elapsed * countMaskInv; if (elapsedOne < minTime) minTime = elapsedOne; if (elapsedOne > maxTime) maxTime = elapsedOne; + + // We only use relative values, so don't have to handle 64-bit wrap-around specially + nowCycles = perf_cpucycles(); + uint64_t elapsedOneCycles = (nowCycles - lastCycles) * countMaskInv; + if (elapsedOneCycles < minCycles) minCycles = elapsedOneCycles; + if (elapsedOneCycles > maxCycles) maxCycles = elapsedOneCycles; + if (elapsed*128 < maxElapsed) { // If the execution was much too fast (1/128th of maxElapsed), increase the count mask by 8x and restart timing. // The restart avoids including the overhead of this code in the measurement. @@ -61,14 +74,20 @@ bool State::KeepRunning() count = 0; minTime = std::numeric_limits<double>::max(); maxTime = std::numeric_limits<double>::min(); + minCycles = std::numeric_limits<uint64_t>::max(); + maxCycles = std::numeric_limits<uint64_t>::min(); return true; } if (elapsed*16 < maxElapsed) { - countMask = ((countMask<<1)|1) & ((1LL<<60)-1); - countMaskInv = 1./(countMask+1); + uint64_t newCountMask = ((countMask<<1)|1) & ((1LL<<60)-1); + if ((count & newCountMask)==0) { + countMask = newCountMask; + countMaskInv = 1./(countMask+1); + } } } lastTime = now; + lastCycles = nowCycles; ++count; if (now - beginTime < maxElapsed) return true; // Keep going @@ -77,7 +96,9 @@ bool State::KeepRunning() // Output results double average = (now-beginTime)/count; - std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << minTime << "," << maxTime << "," << average << "\n"; + int64_t averageCycles = (nowCycles-beginCycles)/count; + std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << minTime << "," << maxTime << "," << average << "," + << minCycles << "," << maxCycles << "," << averageCycles << "\n"; return false; } diff --git a/src/bench/bench.h b/src/bench/bench.h index f13b145aaf..caf73e949b 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -41,12 +41,18 @@ namespace benchmark { double maxElapsed; double beginTime; double lastTime, minTime, maxTime, countMaskInv; - int64_t count; - int64_t countMask; + uint64_t count; + uint64_t countMask; + uint64_t beginCycles; + uint64_t lastCycles; + uint64_t minCycles; + uint64_t maxCycles; public: State(std::string _name, double _maxElapsed) : name(_name), maxElapsed(_maxElapsed), count(0) { minTime = std::numeric_limits<double>::max(); maxTime = std::numeric_limits<double>::min(); + minCycles = std::numeric_limits<uint64_t>::max(); + maxCycles = std::numeric_limits<uint64_t>::min(); countMask = 1; countMaskInv = 1./(countMask + 1); } diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp index db1402216d..bd768180c6 100644 --- a/src/bench/bench_bitcoin.cpp +++ b/src/bench/bench_bitcoin.cpp @@ -5,7 +5,7 @@ #include "bench.h" #include "key.h" -#include "main.h" +#include "validation.h" #include "util.h" int diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp new file mode 100644 index 0000000000..230e4ca773 --- /dev/null +++ b/src/bench/checkblock.cpp @@ -0,0 +1,56 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "bench.h" + +#include "chainparams.h" +#include "validation.h" +#include "streams.h" +#include "consensus/validation.h" + +namespace block_bench { +#include "bench/data/block413567.raw.h" +} + +// These are the two major time-sinks which happen after we have fully received +// a block off the wire, but before we can relay the block on to peers using +// compact block relay. + +static void DeserializeBlockTest(benchmark::State& state) +{ + CDataStream stream((const char*)block_bench::block413567, + (const char*)&block_bench::block413567[sizeof(block_bench::block413567)], + SER_NETWORK, PROTOCOL_VERSION); + char a; + stream.write(&a, 1); // Prevent compaction + + while (state.KeepRunning()) { + CBlock block; + stream >> block; + assert(stream.Rewind(sizeof(block_bench::block413567))); + } +} + +static void DeserializeAndCheckBlockTest(benchmark::State& state) +{ + CDataStream stream((const char*)block_bench::block413567, + (const char*)&block_bench::block413567[sizeof(block_bench::block413567)], + SER_NETWORK, PROTOCOL_VERSION); + char a; + stream.write(&a, 1); // Prevent compaction + + Consensus::Params params = Params(CBaseChainParams::MAIN).GetConsensus(); + + while (state.KeepRunning()) { + CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here + stream >> block; + assert(stream.Rewind(sizeof(block_bench::block413567))); + + CValidationState validationState; + assert(CheckBlock(block, validationState, params)); + } +} + +BENCHMARK(DeserializeBlockTest); +BENCHMARK(DeserializeAndCheckBlockTest); diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index 7091ee3e11..32690fe484 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -19,7 +19,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, vector<COutput tx.nLockTime = nextLockTime++; // so all transactions get different hashes tx.vout.resize(nInput + 1); tx.vout[nInput].nValue = nValue; - CWalletTx* wtx = new CWalletTx(&wallet, tx); + CWalletTx* wtx = new CWalletTx(&wallet, MakeTransactionRef(std::move(tx))); int nAge = 6 * 24; COutput output(wtx, nInput, nAge, true, true); diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp index 168006154f..737d3572ae 100644 --- a/src/bench/crypto_hash.cpp +++ b/src/bench/crypto_hash.cpp @@ -22,7 +22,7 @@ static void RIPEMD160(benchmark::State& state) uint8_t hash[CRIPEMD160::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); while (state.KeepRunning()) - CRIPEMD160().Write(begin_ptr(in), in.size()).Finalize(hash); + CRIPEMD160().Write(in.data(), in.size()).Finalize(hash); } static void SHA1(benchmark::State& state) @@ -30,7 +30,7 @@ static void SHA1(benchmark::State& state) uint8_t hash[CSHA1::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); while (state.KeepRunning()) - CSHA1().Write(begin_ptr(in), in.size()).Finalize(hash); + CSHA1().Write(in.data(), in.size()).Finalize(hash); } static void SHA256(benchmark::State& state) @@ -38,7 +38,7 @@ static void SHA256(benchmark::State& state) uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); while (state.KeepRunning()) - CSHA256().Write(begin_ptr(in), in.size()).Finalize(hash); + CSHA256().Write(in.data(), in.size()).Finalize(hash); } static void SHA256_32b(benchmark::State& state) @@ -46,7 +46,7 @@ static void SHA256_32b(benchmark::State& state) std::vector<uint8_t> in(32,0); while (state.KeepRunning()) { for (int i = 0; i < 1000000; i++) { - CSHA256().Write(begin_ptr(in), in.size()).Finalize(&in[0]); + CSHA256().Write(in.data(), in.size()).Finalize(&in[0]); } } } @@ -56,7 +56,7 @@ static void SHA512(benchmark::State& state) uint8_t hash[CSHA512::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); while (state.KeepRunning()) - CSHA512().Write(begin_ptr(in), in.size()).Finalize(hash); + CSHA512().Write(in.data(), in.size()).Finalize(hash); } static void SipHash_32b(benchmark::State& state) diff --git a/src/bench/data/block413567.raw b/src/bench/data/block413567.raw Binary files differnew file mode 100644 index 0000000000..67d2d5d382 --- /dev/null +++ b/src/bench/data/block413567.raw diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp new file mode 100644 index 0000000000..5df5b1ac6e --- /dev/null +++ b/src/bench/lockedpool.cpp @@ -0,0 +1,47 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "bench.h" + +#include "support/lockedpool.h" + +#include <iostream> +#include <vector> + +#define ASIZE 2048 +#define BITER 5000 +#define MSIZE 2048 + +static void LockedPool(benchmark::State& state) +{ + void *synth_base = reinterpret_cast<void*>(0x08000000); + const size_t synth_size = 1024*1024; + Arena b(synth_base, synth_size, 16); + + std::vector<void*> addr; + for (int x=0; x<ASIZE; ++x) + addr.push_back(0); + uint32_t s = 0x12345678; + while (state.KeepRunning()) { + for (int x=0; x<BITER; ++x) { + int idx = s & (addr.size()-1); + if (s & 0x80000000) { + b.free(addr[idx]); + addr[idx] = 0; + } else if(!addr[idx]) { + addr[idx] = b.alloc((s >> 16) & (MSIZE-1)); + } + bool lsb = s & 1; + s >>= 1; + if (lsb) + s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 + } + } + for (void *ptr: addr) + b.free(ptr); + addr.clear(); +} + +BENCHMARK(LockedPool); + diff --git a/src/bench/perf.cpp b/src/bench/perf.cpp new file mode 100644 index 0000000000..1f43e5d3ac --- /dev/null +++ b/src/bench/perf.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "perf.h" + +#if defined(__i386__) || defined(__x86_64__) + +/* These architectures support quering the cycle counter + * from user space, no need for any syscall overhead. + */ +void perf_init(void) { } +void perf_fini(void) { } + +#elif defined(__linux__) + +#include <unistd.h> +#include <sys/syscall.h> +#include <linux/perf_event.h> + +static int fd = -1; +static struct perf_event_attr attr; + +void perf_init(void) +{ + attr.type = PERF_TYPE_HARDWARE; + attr.config = PERF_COUNT_HW_CPU_CYCLES; + fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0); +} + +void perf_fini(void) +{ + if (fd != -1) { + close(fd); + } +} + +uint64_t perf_cpucycles(void) +{ + uint64_t result = 0; + if (fd == -1 || read(fd, &result, sizeof(result)) < (ssize_t)sizeof(result)) { + return 0; + } + return result; +} + +#else /* Unhandled platform */ + +void perf_init(void) { } +void perf_fini(void) { } +uint64_t perf_cpucycles(void) { return 0; } + +#endif diff --git a/src/bench/perf.h b/src/bench/perf.h new file mode 100644 index 0000000000..681bd0c8a2 --- /dev/null +++ b/src/bench/perf.h @@ -0,0 +1,37 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +/** Functions for measurement of CPU cycles */ +#ifndef H_PERF +#define H_PERF + +#include <stdint.h> + +#if defined(__i386__) + +static inline uint64_t perf_cpucycles(void) +{ + uint64_t x; + __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); + return x; +} + +#elif defined(__x86_64__) + +static inline uint64_t perf_cpucycles(void) +{ + uint32_t hi, lo; + __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo)|(((uint64_t)hi)<<32); +} +#else + +uint64_t perf_cpucycles(void); + +#endif + +void perf_init(void); +void perf_fini(void); + +#endif // H_PERF diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp index dc3940cdbd..91b8c44472 100644 --- a/src/bench/verify_script.cpp +++ b/src/bench/verify_script.cpp @@ -91,7 +91,7 @@ static void VerifyScriptBench(benchmark::State& state) CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << txSpend; int csuccess = bitcoinconsensus_verify_script_with_amount( - begin_ptr(txCredit.vout[0].scriptPubKey), + txCredit.vout[0].scriptPubKey.data(), txCredit.vout[0].scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, flags, nullptr); diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 2d66448d80..596cc8eada 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -24,14 +24,13 @@ #include <univalue.h> -using namespace std; - static const char DEFAULT_RPCCONNECT[] = "127.0.0.1"; static const int DEFAULT_HTTP_CLIENT_TIMEOUT=900; +static const int CONTINUE_EXECUTION=-1; std::string HelpMessageCli() { - string strUsage; + std::string strUsage; strUsage += HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("This help message")); strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); @@ -67,7 +66,11 @@ public: }; -static bool AppInitRPC(int argc, char* argv[]) +// +// This function returns either one of EXIT_ codes when it's expected to stop the process or +// CONTINUE_EXECUTION when it's expected to continue further. +// +static int AppInitRPC(int argc, char* argv[]) { // // Parameters @@ -85,31 +88,35 @@ static bool AppInitRPC(int argc, char* argv[]) } fprintf(stdout, "%s", strUsage.c_str()); - return false; + if (argc < 2) { + fprintf(stderr, "Error: too few parameters\n"); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; } if (!boost::filesystem::is_directory(GetDataDir(false))) { fprintf(stderr, "Error: Specified data directory \"%s\" does not exist.\n", mapArgs["-datadir"].c_str()); - return false; + return EXIT_FAILURE; } try { ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME), mapArgs, mapMultiArgs); } catch (const std::exception& e) { fprintf(stderr,"Error reading configuration file: %s\n", e.what()); - return false; + return EXIT_FAILURE; } // Check for -testnet or -regtest parameter (BaseParams() calls are only valid after this clause) try { SelectBaseParams(ChainNameFromCommandLine()); } catch (const std::exception& e) { fprintf(stderr, "Error: %s\n", e.what()); - return false; + return EXIT_FAILURE; } if (GetBoolArg("-rpcssl", false)) { fprintf(stderr, "Error: SSL mode for RPC (-rpcssl) is no longer supported.\n"); - return false; + return EXIT_FAILURE; } - return true; + return CONTINUE_EXECUTION; } @@ -178,7 +185,7 @@ static void http_error_cb(enum evhttp_request_error err, void *ctx) } #endif -UniValue CallRPC(const string& strMethod, const UniValue& params) +UniValue CallRPC(const std::string& strMethod, const UniValue& params) { std::string host = GetArg("-rpcconnect", DEFAULT_RPCCONNECT); int port = GetArg("-rpcport", BaseParams().RPCPort()); @@ -186,18 +193,18 @@ UniValue CallRPC(const string& strMethod, const UniValue& params) // Create event base struct event_base *base = event_base_new(); // TODO RAII if (!base) - throw runtime_error("cannot create event_base"); + throw std::runtime_error("cannot create event_base"); // Synchronously look up hostname struct evhttp_connection *evcon = evhttp_connection_base_new(base, NULL, host.c_str(), port); // TODO RAII if (evcon == NULL) - throw runtime_error("create connection failed"); + throw std::runtime_error("create connection failed"); evhttp_connection_set_timeout(evcon, GetArg("-rpcclienttimeout", DEFAULT_HTTP_CLIENT_TIMEOUT)); HTTPReply response; struct evhttp_request *req = evhttp_request_new(http_request_done, (void*)&response); // TODO RAII if (req == NULL) - throw runtime_error("create http request failed"); + throw std::runtime_error("create http request failed"); #if LIBEVENT_VERSION_NUMBER >= 0x02010300 evhttp_request_set_error_cb(req, http_error_cb); #endif @@ -207,7 +214,7 @@ UniValue CallRPC(const string& strMethod, const UniValue& params) if (mapArgs["-rpcpassword"] == "") { // Try fall back to cookie-based authentication if no password is provided if (!GetAuthCookie(&strRPCUserColonPass)) { - throw runtime_error(strprintf( + throw std::runtime_error(strprintf( _("Could not locate RPC credentials. No authentication cookie could be found, and no rpcpassword is set in the configuration file (%s)"), GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string().c_str())); @@ -240,28 +247,28 @@ UniValue CallRPC(const string& strMethod, const UniValue& params) event_base_free(base); if (response.status == 0) - throw CConnectionFailed(strprintf("couldn't connect to server (%d %s)", response.error, http_errorstring(response.error))); + throw CConnectionFailed(strprintf("couldn't connect to server: %s (code %d)\n(make sure server is running and you are connecting to the correct RPC port)", http_errorstring(response.error), response.error)); else if (response.status == HTTP_UNAUTHORIZED) - throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)"); + throw std::runtime_error("incorrect rpcuser or rpcpassword (authorization failed)"); else if (response.status >= 400 && response.status != HTTP_BAD_REQUEST && response.status != HTTP_NOT_FOUND && response.status != HTTP_INTERNAL_SERVER_ERROR) - throw runtime_error(strprintf("server returned HTTP error %d", response.status)); + throw std::runtime_error(strprintf("server returned HTTP error %d", response.status)); else if (response.body.empty()) - throw runtime_error("no response from server"); + throw std::runtime_error("no response from server"); // Parse reply UniValue valReply(UniValue::VSTR); if (!valReply.read(response.body)) - throw runtime_error("couldn't parse reply from server"); + throw std::runtime_error("couldn't parse reply from server"); const UniValue& reply = valReply.get_obj(); if (reply.empty()) - throw runtime_error("expected reply to have result, error and id properties"); + throw std::runtime_error("expected reply to have result, error and id properties"); return reply; } int CommandLineRPC(int argc, char *argv[]) { - string strPrint; + std::string strPrint; int nRet = 0; try { // Skip switches @@ -277,7 +284,7 @@ int CommandLineRPC(int argc, char *argv[]) args.push_back(line); } if (args.size() < 1) - throw runtime_error("too few parameters (need at least command)"); + throw std::runtime_error("too few parameters (need at least command)"); std::string strMethod = args[0]; UniValue params = RPCConvertValues(strMethod, std::vector<std::string>(args.begin()+1, args.end())); @@ -331,7 +338,7 @@ int CommandLineRPC(int argc, char *argv[]) throw; } catch (const std::exception& e) { - strPrint = string("error: ") + e.what(); + strPrint = std::string("error: ") + e.what(); nRet = EXIT_FAILURE; } catch (...) { @@ -354,8 +361,9 @@ int main(int argc, char* argv[]) } try { - if(!AppInitRPC(argc, argv)) - return EXIT_FAILURE; + int ret = AppInitRPC(argc, argv); + if (ret != CONTINUE_EXECUTION) + return ret; } catch (const std::exception& e) { PrintExceptionContinue(&e, "AppInitRPC()"); diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index e09afd632e..1ed1449f03 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -26,12 +26,15 @@ #include <boost/algorithm/string.hpp> #include <boost/assign/list_of.hpp> -using namespace std; - static bool fCreateBlank; -static map<string,UniValue> registers; - -static bool AppInitRawTx(int argc, char* argv[]) +static std::map<std::string,UniValue> registers; +static const int CONTINUE_EXECUTION=-1; + +// +// This function returns either one of EXIT_ codes when it's expected to stop the process or +// CONTINUE_EXECUTION when it's expected to continue further. +// +static int AppInitRawTx(int argc, char* argv[]) { // // Parameters @@ -43,7 +46,7 @@ static bool AppInitRawTx(int argc, char* argv[]) SelectParams(ChainNameFromCommandLine()); } catch (const std::exception& e) { fprintf(stderr, "Error: %s\n", e.what()); - return false; + return EXIT_FAILURE; } fCreateBlank = GetBoolArg("-create", false); @@ -89,57 +92,61 @@ static bool AppInitRawTx(int argc, char* argv[]) strUsage += HelpMessageOpt("set=NAME:JSON-STRING", _("Set register NAME to given JSON-STRING")); fprintf(stdout, "%s", strUsage.c_str()); - return false; + if (argc < 2) { + fprintf(stderr, "Error: too few parameters\n"); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; } - return true; + return CONTINUE_EXECUTION; } -static void RegisterSetJson(const string& key, const string& rawJson) +static void RegisterSetJson(const std::string& key, const std::string& rawJson) { UniValue val; if (!val.read(rawJson)) { - string strErr = "Cannot parse JSON for key " + key; - throw runtime_error(strErr); + std::string strErr = "Cannot parse JSON for key " + key; + throw std::runtime_error(strErr); } registers[key] = val; } -static void RegisterSet(const string& strInput) +static void RegisterSet(const std::string& strInput) { // separate NAME:VALUE in string size_t pos = strInput.find(':'); - if ((pos == string::npos) || + if ((pos == std::string::npos) || (pos == 0) || (pos == (strInput.size() - 1))) - throw runtime_error("Register input requires NAME:VALUE"); + throw std::runtime_error("Register input requires NAME:VALUE"); - string key = strInput.substr(0, pos); - string valStr = strInput.substr(pos + 1, string::npos); + std::string key = strInput.substr(0, pos); + std::string valStr = strInput.substr(pos + 1, std::string::npos); RegisterSetJson(key, valStr); } -static void RegisterLoad(const string& strInput) +static void RegisterLoad(const std::string& strInput) { // separate NAME:FILENAME in string size_t pos = strInput.find(':'); - if ((pos == string::npos) || + if ((pos == std::string::npos) || (pos == 0) || (pos == (strInput.size() - 1))) - throw runtime_error("Register load requires NAME:FILENAME"); + throw std::runtime_error("Register load requires NAME:FILENAME"); - string key = strInput.substr(0, pos); - string filename = strInput.substr(pos + 1, string::npos); + std::string key = strInput.substr(0, pos); + std::string filename = strInput.substr(pos + 1, std::string::npos); FILE *f = fopen(filename.c_str(), "r"); if (!f) { - string strErr = "Cannot open file " + filename; - throw runtime_error(strErr); + std::string strErr = "Cannot open file " + filename; + throw std::runtime_error(strErr); } // load file chunks into one big buffer - string valStr; + std::string valStr; while ((!feof(f)) && (!ferror(f))) { char buf[4096]; int bread = fread(buf, 1, sizeof(buf), f); @@ -153,55 +160,55 @@ static void RegisterLoad(const string& strInput) fclose(f); if (error) { - string strErr = "Error reading file " + filename; - throw runtime_error(strErr); + std::string strErr = "Error reading file " + filename; + throw std::runtime_error(strErr); } // evaluate as JSON buffer register RegisterSetJson(key, valStr); } -static void MutateTxVersion(CMutableTransaction& tx, const string& cmdVal) +static void MutateTxVersion(CMutableTransaction& tx, const std::string& cmdVal) { int64_t newVersion = atoi64(cmdVal); if (newVersion < 1 || newVersion > CTransaction::MAX_STANDARD_VERSION) - throw runtime_error("Invalid TX version requested"); + throw std::runtime_error("Invalid TX version requested"); tx.nVersion = (int) newVersion; } -static void MutateTxLocktime(CMutableTransaction& tx, const string& cmdVal) +static void MutateTxLocktime(CMutableTransaction& tx, const std::string& cmdVal) { int64_t newLocktime = atoi64(cmdVal); if (newLocktime < 0LL || newLocktime > 0xffffffffLL) - throw runtime_error("Invalid TX locktime requested"); + throw std::runtime_error("Invalid TX locktime requested"); tx.nLockTime = (unsigned int) newLocktime; } -static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput) +static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInput) { std::vector<std::string> vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); // separate TXID:VOUT in string if (vStrInputParts.size()<2) - throw runtime_error("TX input missing separator"); + throw std::runtime_error("TX input missing separator"); // extract and validate TXID - string strTxid = vStrInputParts[0]; + std::string strTxid = vStrInputParts[0]; if ((strTxid.size() != 64) || !IsHex(strTxid)) - throw runtime_error("invalid TX input txid"); + throw std::runtime_error("invalid TX input txid"); uint256 txid(uint256S(strTxid)); static const unsigned int minTxOutSz = 9; static const unsigned int maxVout = MAX_BLOCK_BASE_SIZE / minTxOutSz; // extract and validate vout - string strVout = vStrInputParts[1]; + std::string strVout = vStrInputParts[1]; int vout = atoi(strVout); if ((vout < 0) || (vout > (int)maxVout)) - throw runtime_error("invalid TX input vout"); + throw std::runtime_error("invalid TX input vout"); // extract the optional sequence number uint32_t nSequenceIn=std::numeric_limits<unsigned int>::max(); @@ -213,26 +220,26 @@ static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput) tx.vin.push_back(txin); } -static void MutateTxAddOutAddr(CMutableTransaction& tx, const string& strInput) +static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strInput) { // separate VALUE:ADDRESS in string size_t pos = strInput.find(':'); - if ((pos == string::npos) || + if ((pos == std::string::npos) || (pos == 0) || (pos == (strInput.size() - 1))) - throw runtime_error("TX output missing separator"); + throw std::runtime_error("TX output missing separator"); // extract and validate VALUE - string strValue = strInput.substr(0, pos); + std::string strValue = strInput.substr(0, pos); CAmount value; if (!ParseMoney(strValue, value)) - throw runtime_error("invalid TX output value"); + throw std::runtime_error("invalid TX output value"); // extract and validate ADDRESS - string strAddr = strInput.substr(pos + 1, string::npos); + std::string strAddr = strInput.substr(pos + 1, std::string::npos); CBitcoinAddress addr(strAddr); if (!addr.IsValid()) - throw runtime_error("invalid TX output address"); + throw std::runtime_error("invalid TX output address"); // build standard output script via GetScriptForDestination() CScript scriptPubKey = GetScriptForDestination(addr.Get()); @@ -242,7 +249,7 @@ static void MutateTxAddOutAddr(CMutableTransaction& tx, const string& strInput) tx.vout.push_back(txout); } -static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput) +static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strInput) { CAmount value = 0; @@ -250,20 +257,20 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput) size_t pos = strInput.find(':'); if (pos==0) - throw runtime_error("TX output value not specified"); + throw std::runtime_error("TX output value not specified"); - if (pos != string::npos) { + if (pos != std::string::npos) { // extract and validate VALUE - string strValue = strInput.substr(0, pos); + std::string strValue = strInput.substr(0, pos); if (!ParseMoney(strValue, value)) - throw runtime_error("invalid TX output value"); + throw std::runtime_error("invalid TX output value"); } // extract and validate DATA - string strData = strInput.substr(pos + 1, string::npos); + std::string strData = strInput.substr(pos + 1, std::string::npos); if (!IsHex(strData)) - throw runtime_error("invalid TX output data"); + throw std::runtime_error("invalid TX output data"); std::vector<unsigned char> data = ParseHex(strData); @@ -271,22 +278,22 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput) tx.vout.push_back(txout); } -static void MutateTxAddOutScript(CMutableTransaction& tx, const string& strInput) +static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& strInput) { // separate VALUE:SCRIPT in string size_t pos = strInput.find(':'); - if ((pos == string::npos) || + if ((pos == std::string::npos) || (pos == 0)) - throw runtime_error("TX output missing separator"); + throw std::runtime_error("TX output missing separator"); // extract and validate VALUE - string strValue = strInput.substr(0, pos); + std::string strValue = strInput.substr(0, pos); CAmount value; if (!ParseMoney(strValue, value)) - throw runtime_error("invalid TX output value"); + throw std::runtime_error("invalid TX output value"); // extract and validate script - string strScript = strInput.substr(pos + 1, string::npos); + std::string strScript = strInput.substr(pos + 1, std::string::npos); CScript scriptPubKey = ParseScript(strScript); // throws on err // construct TxOut, append to transaction output list @@ -294,26 +301,26 @@ static void MutateTxAddOutScript(CMutableTransaction& tx, const string& strInput tx.vout.push_back(txout); } -static void MutateTxDelInput(CMutableTransaction& tx, const string& strInIdx) +static void MutateTxDelInput(CMutableTransaction& tx, const std::string& strInIdx) { // parse requested deletion index int inIdx = atoi(strInIdx); if (inIdx < 0 || inIdx >= (int)tx.vin.size()) { - string strErr = "Invalid TX input index '" + strInIdx + "'"; - throw runtime_error(strErr.c_str()); + std::string strErr = "Invalid TX input index '" + strInIdx + "'"; + throw std::runtime_error(strErr.c_str()); } // delete input from transaction tx.vin.erase(tx.vin.begin() + inIdx); } -static void MutateTxDelOutput(CMutableTransaction& tx, const string& strOutIdx) +static void MutateTxDelOutput(CMutableTransaction& tx, const std::string& strOutIdx) { // parse requested deletion index int outIdx = atoi(strOutIdx); if (outIdx < 0 || outIdx >= (int)tx.vout.size()) { - string strErr = "Invalid TX output index '" + strOutIdx + "'"; - throw runtime_error(strErr.c_str()); + std::string strErr = "Invalid TX output index '" + strOutIdx + "'"; + throw std::runtime_error(strErr.c_str()); } // delete output from transaction @@ -333,7 +340,7 @@ static const struct { {"SINGLE|ANYONECANPAY", SIGHASH_SINGLE|SIGHASH_ANYONECANPAY}, }; -static bool findSighashFlags(int& flags, const string& flagStr) +static bool findSighashFlags(int& flags, const std::string& flagStr) { flags = 0; @@ -347,17 +354,17 @@ static bool findSighashFlags(int& flags, const string& flagStr) return false; } -uint256 ParseHashUO(map<string,UniValue>& o, string strKey) +uint256 ParseHashUO(std::map<std::string,UniValue>& o, std::string strKey) { if (!o.count(strKey)) return uint256(); return ParseHashUV(o[strKey], strKey); } -vector<unsigned char> ParseHexUO(map<string,UniValue>& o, string strKey) +std::vector<unsigned char> ParseHexUO(std::map<std::string,UniValue>& o, std::string strKey) { if (!o.count(strKey)) { - vector<unsigned char> emptyVec; + std::vector<unsigned char> emptyVec; return emptyVec; } return ParseHexUV(o[strKey], strKey); @@ -366,24 +373,24 @@ vector<unsigned char> ParseHexUO(map<string,UniValue>& o, string strKey) static CAmount AmountFromValue(const UniValue& value) { if (!value.isNum() && !value.isStr()) - throw runtime_error("Amount is not a number or string"); + throw std::runtime_error("Amount is not a number or string"); CAmount amount; if (!ParseFixedPoint(value.getValStr(), 8, &amount)) - throw runtime_error("Invalid amount"); + throw std::runtime_error("Invalid amount"); if (!MoneyRange(amount)) - throw runtime_error("Amount out of range"); + throw std::runtime_error("Amount out of range"); return amount; } -static void MutateTxSign(CMutableTransaction& tx, const string& flagStr) +static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr) { int nHashType = SIGHASH_ALL; if (flagStr.size() > 0) if (!findSighashFlags(nHashType, flagStr)) - throw runtime_error("unknown sighash flag/sign option"); + throw std::runtime_error("unknown sighash flag/sign option"); - vector<CTransaction> txVariants; + std::vector<CTransaction> txVariants; txVariants.push_back(tx); // mergedTx will end up with all the signatures; it @@ -394,17 +401,17 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr) CCoinsViewCache view(&viewDummy); if (!registers.count("privatekeys")) - throw runtime_error("privatekeys register variable must be set."); + throw std::runtime_error("privatekeys register variable must be set."); CBasicKeyStore tempKeystore; UniValue keysObj = registers["privatekeys"]; for (unsigned int kidx = 0; kidx < keysObj.size(); kidx++) { if (!keysObj[kidx].isStr()) - throw runtime_error("privatekey not a string"); + throw std::runtime_error("privatekey not a std::string"); CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(keysObj[kidx].getValStr()); if (!fGood) - throw runtime_error("privatekey not valid"); + throw std::runtime_error("privatekey not valid"); CKey key = vchSecret.GetKey(); tempKeystore.AddKey(key); @@ -412,34 +419,34 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr) // Add previous txouts given in the RPC call: if (!registers.count("prevtxs")) - throw runtime_error("prevtxs register variable must be set."); + throw std::runtime_error("prevtxs register variable must be set."); UniValue prevtxsObj = registers["prevtxs"]; { for (unsigned int previdx = 0; previdx < prevtxsObj.size(); previdx++) { UniValue prevOut = prevtxsObj[previdx]; if (!prevOut.isObject()) - throw runtime_error("expected prevtxs internal object"); + throw std::runtime_error("expected prevtxs internal object"); - map<string,UniValue::VType> types = boost::assign::map_list_of("txid", UniValue::VSTR)("vout",UniValue::VNUM)("scriptPubKey",UniValue::VSTR); + std::map<std::string,UniValue::VType> types = boost::assign::map_list_of("txid", UniValue::VSTR)("vout",UniValue::VNUM)("scriptPubKey",UniValue::VSTR); if (!prevOut.checkObject(types)) - throw runtime_error("prevtxs internal object typecheck fail"); + throw std::runtime_error("prevtxs internal object typecheck fail"); uint256 txid = ParseHashUV(prevOut["txid"], "txid"); int nOut = atoi(prevOut["vout"].getValStr()); if (nOut < 0) - throw runtime_error("vout must be positive"); + throw std::runtime_error("vout must be positive"); - vector<unsigned char> pkData(ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey")); + std::vector<unsigned char> pkData(ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey")); CScript scriptPubKey(pkData.begin(), pkData.end()); { CCoinsModifier coins = view.ModifyCoins(txid); if (coins->IsAvailable(nOut) && coins->vout[nOut].scriptPubKey != scriptPubKey) { - string err("Previous output scriptPubKey mismatch:\n"); + std::string err("Previous output scriptPubKey mismatch:\n"); err = err + ScriptToAsmStr(coins->vout[nOut].scriptPubKey) + "\nvs:\n"+ ScriptToAsmStr(scriptPubKey); - throw runtime_error(err); + throw std::runtime_error(err); } if ((unsigned int)nOut >= coins->vout.size()) coins->vout.resize(nOut+1); @@ -455,7 +462,7 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr) if ((scriptPubKey.IsPayToScriptHash() || scriptPubKey.IsPayToWitnessScriptHash()) && prevOut.exists("redeemScript")) { UniValue v = prevOut["redeemScript"]; - vector<unsigned char> rsData(ParseHexUV(v, "redeemScript")); + std::vector<unsigned char> rsData(ParseHexUV(v, "redeemScript")); CScript redeemScript(rsData.begin(), rsData.end()); tempKeystore.AddCScript(redeemScript); } @@ -512,8 +519,8 @@ public: } }; -static void MutateTx(CMutableTransaction& tx, const string& command, - const string& commandVal) +static void MutateTx(CMutableTransaction& tx, const std::string& command, + const std::string& commandVal) { std::unique_ptr<Secp256k1Init> ecc; @@ -548,7 +555,7 @@ static void MutateTx(CMutableTransaction& tx, const string& command, RegisterSet(commandVal); else - throw runtime_error("unknown command"); + throw std::runtime_error("unknown command"); } static void OutputTxJSON(const CTransaction& tx) @@ -556,20 +563,20 @@ static void OutputTxJSON(const CTransaction& tx) UniValue entry(UniValue::VOBJ); TxToUniv(tx, uint256(), entry); - string jsonOutput = entry.write(4); + std::string jsonOutput = entry.write(4); fprintf(stdout, "%s\n", jsonOutput.c_str()); } static void OutputTxHash(const CTransaction& tx) { - string strHexHash = tx.GetHash().GetHex(); // the hex-encoded transaction hash (aka the transaction id) + std::string strHexHash = tx.GetHash().GetHex(); // the hex-encoded transaction hash (aka the transaction id) fprintf(stdout, "%s\n", strHexHash.c_str()); } static void OutputTxHex(const CTransaction& tx) { - string strHex = EncodeHexTx(tx); + std::string strHex = EncodeHexTx(tx); fprintf(stdout, "%s\n", strHex.c_str()); } @@ -584,10 +591,10 @@ static void OutputTx(const CTransaction& tx) OutputTxHex(tx); } -static string readStdin() +static std::string readStdin() { char buf[4096]; - string ret; + std::string ret; while (!feof(stdin)) { size_t bread = fread(buf, 1, sizeof(buf), stdin); @@ -597,7 +604,7 @@ static string readStdin() } if (ferror(stdin)) - throw runtime_error("error reading stdin"); + throw std::runtime_error("error reading stdin"); boost::algorithm::trim_right(ret); @@ -606,7 +613,7 @@ static string readStdin() static int CommandLineRawTx(int argc, char* argv[]) { - string strPrint; + std::string strPrint; int nRet = 0; try { // Skip switches; Permit common stdin convention "-" @@ -616,33 +623,31 @@ static int CommandLineRawTx(int argc, char* argv[]) argv++; } - CTransaction txDecodeTmp; + CMutableTransaction tx; int startArg; if (!fCreateBlank) { // require at least one param if (argc < 2) - throw runtime_error("too few parameters"); + throw std::runtime_error("too few parameters"); // param: hex-encoded bitcoin transaction - string strHexTx(argv[1]); + std::string strHexTx(argv[1]); if (strHexTx == "-") // "-" implies standard input strHexTx = readStdin(); - if (!DecodeHexTx(txDecodeTmp, strHexTx)) - throw runtime_error("invalid transaction encoding"); + if (!DecodeHexTx(tx, strHexTx, true)) + throw std::runtime_error("invalid transaction encoding"); startArg = 2; } else startArg = 1; - CMutableTransaction tx(txDecodeTmp); - for (int i = startArg; i < argc; i++) { - string arg = argv[i]; - string key, value; + std::string arg = argv[i]; + std::string key, value; size_t eqpos = arg.find('='); - if (eqpos == string::npos) + if (eqpos == std::string::npos) key = arg; else { key = arg.substr(0, eqpos); @@ -659,7 +664,7 @@ static int CommandLineRawTx(int argc, char* argv[]) throw; } catch (const std::exception& e) { - strPrint = string("error: ") + e.what(); + strPrint = std::string("error: ") + e.what(); nRet = EXIT_FAILURE; } catch (...) { @@ -678,8 +683,9 @@ int main(int argc, char* argv[]) SetupEnvironment(); try { - if(!AppInitRawTx(argc, argv)) - return EXIT_FAILURE; + int ret = AppInitRawTx(argc, argv); + if (ret != CONTINUE_EXECUTION) + return ret; } catch (const std::exception& e) { PrintExceptionContinue(&e, "AppInitRawTx()"); diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index 351463c256..ba3ccac615 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -92,7 +92,7 @@ bool AppInit(int argc, char* argv[]) } fprintf(stdout, "%s", strUsage.c_str()); - return false; + return true; } try @@ -126,6 +126,26 @@ bool AppInit(int argc, char* argv[]) if (fCommandLine) { fprintf(stderr, "Error: There is no RPC client functionality in bitcoind anymore. Use the bitcoin-cli utility instead.\n"); + exit(EXIT_FAILURE); + } + // -server defaults to true for bitcoind but not for the GUI so do this here + SoftSetBoolArg("-server", true); + // Set this early so that parameter interactions go to console + InitLogging(); + InitParameterInteraction(); + if (!AppInitBasicSetup()) + { + // InitError will have been called with detailed error, which ends up on console + exit(1); + } + if (!AppInitParameterInteraction()) + { + // InitError will have been called with detailed error, which ends up on console + exit(1); + } + if (!AppInitSanityChecks()) + { + // InitError will have been called with detailed error, which ends up on console exit(1); } if (GetBoolArg("-daemon", false)) @@ -143,12 +163,8 @@ bool AppInit(int argc, char* argv[]) return false; #endif // HAVE_DECL_DAEMON } - SoftSetBoolArg("-server", true); - // Set this early so that parameter interactions go to console - InitLogging(); - InitParameterInteraction(); - fRet = AppInit2(threadGroup, scheduler); + fRet = AppInitMain(threadGroup, scheduler); } catch (const std::exception& e) { PrintExceptionContinue(&e, "AppInit()"); @@ -177,5 +193,5 @@ int main(int argc, char* argv[]) // Connect bitcoind signal handlers noui_connect(); - return (AppInit(argc, argv) ? 0 : 1); + return (AppInit(argc, argv) ? EXIT_SUCCESS : EXIT_FAILURE); } diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 93d3fa372b..914af0c670 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -10,7 +10,7 @@ #include "random.h" #include "streams.h" #include "txmempool.h" -#include "main.h" +#include "validation.h" #include "util.h" #include <unordered_map> @@ -24,7 +24,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool f //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase prefilledtxn[0] = {0, block.vtx[0]}; for (size_t i = 1; i < block.vtx.size(); i++) { - const CTransaction& tx = block.vtx[i]; + const CTransaction& tx = *block.vtx[i]; shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash()); } } @@ -59,10 +59,10 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c int32_t lastprefilledindex = -1; for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) { - if (cmpctblock.prefilledtxn[i].tx.IsNull()) + if (cmpctblock.prefilledtxn[i].tx->IsNull()) return READ_STATUS_INVALID; - lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so cant overflow here + lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here if (lastprefilledindex > std::numeric_limits<uint16_t>::max()) return READ_STATUS_INVALID; if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) { @@ -71,7 +71,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c // have neither a prefilled txn or a shorttxid! return READ_STATUS_INVALID; } - txn_available[lastprefilledindex] = std::make_shared<CTransaction>(cmpctblock.prefilledtxn[i].tx); + txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx; } prefilled_count = cmpctblock.prefilledtxn.size(); @@ -131,7 +131,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c break; } - LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), cmpctblock.GetSerializeSize(SER_NETWORK, PROTOCOL_VERSION)); + LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION)); return READ_STATUS_OK; } @@ -142,7 +142,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const { return txn_available[index] ? true : false; } -ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransaction>& vtx_missing) const { +ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) const { assert(!header.IsNull()); block = header; block.vtx.resize(txn_available.size()); @@ -154,7 +154,7 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< return READ_STATUS_INVALID; block.vtx[i] = vtx_missing[tx_missing_offset++]; } else - block.vtx[i] = *txn_available[i]; + block.vtx[i] = txn_available[i]; } if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; @@ -167,13 +167,13 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< // check its own merkle root and cache that check. if (state.CorruptionPossible()) return READ_STATUS_FAILED; // Possible Short ID collision - return READ_STATUS_INVALID; + return READ_STATUS_CHECKBLOCK_FAILED; } LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool and %lu txn requested\n", header.GetHash().ToString(), prefilled_count, mempool_count, vtx_missing.size()); if (vtx_missing.size() < 5) { - for(const CTransaction& tx : vtx_missing) - LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", header.GetHash().ToString(), tx.GetHash().ToString()); + for (const auto& tx : vtx_missing) + LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", header.GetHash().ToString(), tx->GetHash().ToString()); } return READ_STATUS_OK; diff --git a/src/blockencodings.h b/src/blockencodings.h index 99b1cb140d..27baf1f8f8 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -14,14 +14,14 @@ class CTxMemPool; // Dumb helper to handle CTransaction compression at serialize-time struct TransactionCompressor { private: - CTransaction& tx; + CTransactionRef& tx; public: - TransactionCompressor(CTransaction& txIn) : tx(txIn) {} + TransactionCompressor(CTransactionRef& txIn) : tx(txIn) {} ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(tx); //TODO: Compress tx encoding } }; @@ -35,7 +35,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(blockhash); uint64_t indexes_size = (uint64_t)indexes.size(); READWRITE(COMPACTSIZE(indexes_size)); @@ -72,7 +72,7 @@ class BlockTransactions { public: // A BlockTransactions message uint256 blockhash; - std::vector<CTransaction> txn; + std::vector<CTransactionRef> txn; BlockTransactions() {} BlockTransactions(const BlockTransactionsRequest& req) : @@ -81,7 +81,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(blockhash); uint64_t txn_size = (uint64_t)txn.size(); READWRITE(COMPACTSIZE(txn_size)); @@ -104,12 +104,12 @@ struct PrefilledTransaction { // Used as an offset since last prefilled tx in CBlockHeaderAndShortTxIDs, // as a proper transaction-in-block-index in PartiallyDownloadedBlock uint16_t index; - CTransaction tx; + CTransactionRef tx; ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { uint64_t idx = index; READWRITE(COMPACTSIZE(idx)); if (idx > std::numeric_limits<uint16_t>::max()) @@ -124,6 +124,8 @@ typedef enum ReadStatus_t READ_STATUS_OK, READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap READ_STATUS_FAILED, // Failed to process object + READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a + // failure in CheckBlock. } ReadStatus; class CBlockHeaderAndShortTxIDs { @@ -155,7 +157,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(header); READWRITE(nonce); @@ -191,7 +193,7 @@ public: class PartiallyDownloadedBlock { protected: - std::vector<std::shared_ptr<const CTransaction> > txn_available; + std::vector<CTransactionRef> txn_available; size_t prefilled_count = 0, mempool_count = 0; CTxMemPool* pool; public: @@ -200,7 +202,7 @@ public: ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock); bool IsTxAvailable(size_t index) const; - ReadStatus FillBlock(CBlock& block, const std::vector<CTransaction>& vtx_missing) const; + ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) const; }; #endif diff --git a/src/bloom.cpp b/src/bloom.cpp index 2677652ada..d00befc61c 100644 --- a/src/bloom.cpp +++ b/src/bloom.cpp @@ -34,7 +34,7 @@ CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int * See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas */ isFull(false), - isEmpty(false), + isEmpty(true), nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)), nTweak(nTweakIn), nFlags(nFlagsIn) diff --git a/src/bloom.h b/src/bloom.h index ad6de625d8..d3a017371f 100644 --- a/src/bloom.h +++ b/src/bloom.h @@ -73,7 +73,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(vData); READWRITE(nHashFuncs); READWRITE(nTweak); diff --git a/src/chain.h b/src/chain.h index 46a16a3061..0aac5de5c2 100644 --- a/src/chain.h +++ b/src/chain.h @@ -28,7 +28,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(VARINT(nBlocks)); READWRITE(VARINT(nSize)); READWRITE(VARINT(nUndoSize)); @@ -76,7 +76,7 @@ struct CDiskBlockPos ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(VARINT(nFile)); READWRITE(VARINT(nPos)); } @@ -357,8 +357,9 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT(nVersion)); READWRITE(VARINT(nHeight)); diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 5850016ae2..3b3c0a5d3e 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -31,7 +31,7 @@ static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesi genesis.nBits = nBits; genesis.nNonce = nNonce; genesis.nVersion = nVersion; - genesis.vtx.push_back(txNew); + genesis.vtx.push_back(MakeTransactionRef(std::move(txNew))); genesis.hashPrevBlock.SetNull(); genesis.hashMerkleRoot = BlockMerkleRoot(genesis); return genesis; @@ -96,6 +96,9 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1479168000; // November 15th, 2016. consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1510704000; // November 15th, 2017. + // The best chain should have at least this much work. + consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000002cb971dd56d1c583c20f90"); + /** * The message start string is designed to be unlikely to occur in normal data. * The characters are rarely used upper ASCII, not valid as UTF-8, and produce @@ -191,6 +194,9 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1462060800; // May 1st 2016 consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1493596800; // May 1st 2017 + // The best chain should have at least this much work. + consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000198b4def2baa9338d6"); + pchMessageStart[0] = 0x0b; pchMessageStart[1] = 0x11; pchMessageStart[2] = 0x09; @@ -224,6 +230,7 @@ public: fRequireStandard = false; fMineBlocksOnDemand = false; + checkpointData = (CCheckpointData) { boost::assign::map_list_of ( 546, uint256S("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")), @@ -265,6 +272,9 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 0; consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 999999999999ULL; + // The best chain should have at least this much work. + consensus.nMinimumChainWork = uint256S("0x00"); + pchMessageStart[0] = 0xfa; pchMessageStart[1] = 0xbf; pchMessageStart[2] = 0xb5; diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp index aefddce464..a487fb1dc4 100644 --- a/src/checkpoints.cpp +++ b/src/checkpoints.cpp @@ -6,7 +6,7 @@ #include "chain.h" #include "chainparams.h" -#include "main.h" +#include "validation.h" #include "uint256.h" #include <stdint.h> @@ -55,16 +55,6 @@ namespace Checkpoints { return fWorkBefore / (fWorkBefore + fWorkAfter); } - int GetTotalBlocksEstimate(const CCheckpointData& data) - { - const MapCheckpoints& checkpoints = data.mapCheckpoints; - - if (checkpoints.empty()) - return 0; - - return checkpoints.rbegin()->first; - } - CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) { const MapCheckpoints& checkpoints = data.mapCheckpoints; diff --git a/src/checkpoints.h b/src/checkpoints.h index cd25ea5379..04346f35ff 100644 --- a/src/checkpoints.h +++ b/src/checkpoints.h @@ -19,9 +19,6 @@ struct CCheckpointData; namespace Checkpoints { -//! Return conservative estimate of total number of blocks, 0 if unknown -int GetTotalBlocksEstimate(const CCheckpointData& data); - //! Returns last CBlockIndex* in mapBlockIndex that is a checkpoint CBlockIndex* GetLastCheckpoint(const CCheckpointData& data); diff --git a/src/coins.h b/src/coins.h index 033651a435..dd6ef6cc3a 100644 --- a/src/coins.h +++ b/src/coins.h @@ -153,31 +153,8 @@ public: return fCoinBase; } - unsigned int GetSerializeSize(int nType, int nVersion) const { - unsigned int nSize = 0; - unsigned int nMaskSize = 0, nMaskCode = 0; - CalcMaskSize(nMaskSize, nMaskCode); - bool fFirst = vout.size() > 0 && !vout[0].IsNull(); - bool fSecond = vout.size() > 1 && !vout[1].IsNull(); - assert(fFirst || fSecond || nMaskCode); - unsigned int nCode = 8*(nMaskCode - (fFirst || fSecond ? 0 : 1)) + (fCoinBase ? 1 : 0) + (fFirst ? 2 : 0) + (fSecond ? 4 : 0); - // version - nSize += ::GetSerializeSize(VARINT(this->nVersion), nType, nVersion); - // size of header code - nSize += ::GetSerializeSize(VARINT(nCode), nType, nVersion); - // spentness bitmask - nSize += nMaskSize; - // txouts themself - for (unsigned int i = 0; i < vout.size(); i++) - if (!vout[i].IsNull()) - nSize += ::GetSerializeSize(CTxOutCompressor(REF(vout[i])), nType, nVersion); - // height - nSize += ::GetSerializeSize(VARINT(nHeight), nType, nVersion); - return nSize; - } - template<typename Stream> - void Serialize(Stream &s, int nType, int nVersion) const { + void Serialize(Stream &s) const { unsigned int nMaskSize = 0, nMaskCode = 0; CalcMaskSize(nMaskSize, nMaskCode); bool fFirst = vout.size() > 0 && !vout[0].IsNull(); @@ -185,33 +162,33 @@ public: assert(fFirst || fSecond || nMaskCode); unsigned int nCode = 8*(nMaskCode - (fFirst || fSecond ? 0 : 1)) + (fCoinBase ? 1 : 0) + (fFirst ? 2 : 0) + (fSecond ? 4 : 0); // version - ::Serialize(s, VARINT(this->nVersion), nType, nVersion); + ::Serialize(s, VARINT(this->nVersion)); // header code - ::Serialize(s, VARINT(nCode), nType, nVersion); + ::Serialize(s, VARINT(nCode)); // spentness bitmask for (unsigned int b = 0; b<nMaskSize; b++) { unsigned char chAvail = 0; for (unsigned int i = 0; i < 8 && 2+b*8+i < vout.size(); i++) if (!vout[2+b*8+i].IsNull()) chAvail |= (1 << i); - ::Serialize(s, chAvail, nType, nVersion); + ::Serialize(s, chAvail); } // txouts themself for (unsigned int i = 0; i < vout.size(); i++) { if (!vout[i].IsNull()) - ::Serialize(s, CTxOutCompressor(REF(vout[i])), nType, nVersion); + ::Serialize(s, CTxOutCompressor(REF(vout[i]))); } // coinbase height - ::Serialize(s, VARINT(nHeight), nType, nVersion); + ::Serialize(s, VARINT(nHeight)); } template<typename Stream> - void Unserialize(Stream &s, int nType, int nVersion) { + void Unserialize(Stream &s) { unsigned int nCode = 0; // version - ::Unserialize(s, VARINT(this->nVersion), nType, nVersion); + ::Unserialize(s, VARINT(this->nVersion)); // header code - ::Unserialize(s, VARINT(nCode), nType, nVersion); + ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector<bool> vAvail(2, false); vAvail[0] = (nCode & 2) != 0; @@ -220,7 +197,7 @@ public: // spentness bitmask while (nMaskCode > 0) { unsigned char chAvail = 0; - ::Unserialize(s, chAvail, nType, nVersion); + ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); @@ -232,10 +209,10 @@ public: vout.assign(vAvail.size(), CTxOut()); for (unsigned int i = 0; i < vAvail.size(); i++) { if (vAvail[i]) - ::Unserialize(s, REF(CTxOutCompressor(vout[i])), nType, nVersion); + ::Unserialize(s, REF(CTxOutCompressor(vout[i]))); } // coinbase height - ::Unserialize(s, VARINT(nHeight), nType, nVersion); + ::Unserialize(s, VARINT(nHeight)); Cleanup(); } @@ -490,7 +467,6 @@ public: friend class CCoinsModifier; private: - CCoinsMap::iterator FetchCoins(const uint256 &txid); CCoinsMap::const_iterator FetchCoins(const uint256 &txid) const; /** diff --git a/src/compat.h b/src/compat.h index 79a297e5e4..2578d6d344 100644 --- a/src/compat.h +++ b/src/compat.h @@ -34,6 +34,7 @@ #else #include <sys/fcntl.h> #include <sys/mman.h> +#include <sys/select.h> #include <sys/socket.h> #include <sys/types.h> #include <net/if.h> diff --git a/src/compat/byteswap.h b/src/compat/byteswap.h index 899220bdc5..07ca535728 100644 --- a/src/compat/byteswap.h +++ b/src/compat/byteswap.h @@ -1,4 +1,4 @@ -// Copyright (c) 2014 The Bitcoin developers +// Copyright (c) 2014 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/compat/endian.h b/src/compat/endian.h index 6bfae42c77..f7c1f9318a 100644 --- a/src/compat/endian.h +++ b/src/compat/endian.h @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Bitcoin developers +// Copyright (c) 2014-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/compressor.h b/src/compressor.h index fa702f0dfa..961365d261 100644 --- a/src/compressor.h +++ b/src/compressor.h @@ -55,16 +55,8 @@ protected: public: CScriptCompressor(CScript &scriptIn) : script(scriptIn) { } - unsigned int GetSerializeSize(int nType, int nVersion) const { - std::vector<unsigned char> compr; - if (Compress(compr)) - return compr.size(); - unsigned int nSize = script.size() + nSpecialScripts; - return script.size() + VARINT(nSize).GetSerializeSize(nType, nVersion); - } - template<typename Stream> - void Serialize(Stream &s, int nType, int nVersion) const { + void Serialize(Stream &s) const { std::vector<unsigned char> compr; if (Compress(compr)) { s << CFlatData(compr); @@ -76,7 +68,7 @@ public: } template<typename Stream> - void Unserialize(Stream &s, int nType, int nVersion) { + void Unserialize(Stream &s) { unsigned int nSize = 0; s >> VARINT(nSize); if (nSize < nSpecialScripts) { @@ -112,7 +104,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { if (!ser_action.ForRead()) { uint64_t nVal = CompressAmount(txout.nValue); READWRITE(VARINT(nVal)); diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp index 35f7d2e05a..6fa96ddf45 100644 --- a/src/consensus/merkle.cpp +++ b/src/consensus/merkle.cpp @@ -160,7 +160,7 @@ uint256 BlockMerkleRoot(const CBlock& block, bool* mutated) std::vector<uint256> leaves; leaves.resize(block.vtx.size()); for (size_t s = 0; s < block.vtx.size(); s++) { - leaves[s] = block.vtx[s].GetHash(); + leaves[s] = block.vtx[s]->GetHash(); } return ComputeMerkleRoot(leaves, mutated); } @@ -171,7 +171,7 @@ uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated) leaves.resize(block.vtx.size()); leaves[0].SetNull(); // The witness hash of the coinbase is 0. for (size_t s = 1; s < block.vtx.size(); s++) { - leaves[s] = block.vtx[s].GetWitnessHash(); + leaves[s] = block.vtx[s]->GetWitnessHash(); } return ComputeMerkleRoot(leaves, mutated); } @@ -181,7 +181,7 @@ std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position) std::vector<uint256> leaves; leaves.resize(block.vtx.size()); for (size_t s = 0; s < block.vtx.size(); s++) { - leaves[s] = block.vtx[s].GetHash(); + leaves[s] = block.vtx[s]->GetHash(); } return ComputeMerkleBranch(leaves, position); } diff --git a/src/consensus/params.h b/src/consensus/params.h index 0e73cace83..20efc68ade 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -61,6 +61,7 @@ struct Params { int64_t nPowTargetSpacing; int64_t nPowTargetTimespan; int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; } + uint256 nMinimumChainWork; }; } // namespace Consensus diff --git a/src/core_io.h b/src/core_io.h index b559d44bf5..7642bc6d6e 100644 --- a/src/core_io.h +++ b/src/core_io.h @@ -11,22 +11,23 @@ class CBlock; class CScript; class CTransaction; +struct CMutableTransaction; class uint256; class UniValue; // core_read.cpp -extern CScript ParseScript(const std::string& s); -extern std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false); -extern bool DecodeHexTx(CTransaction& tx, const std::string& strHexTx, bool fTryNoWitness = false); -extern bool DecodeHexBlk(CBlock&, const std::string& strHexBlk); -extern uint256 ParseHashUV(const UniValue& v, const std::string& strName); -extern uint256 ParseHashStr(const std::string&, const std::string& strName); -extern std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strName); +CScript ParseScript(const std::string& s); +std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false); +bool DecodeHexTx(CMutableTransaction& tx, const std::string& strHexTx, bool fTryNoWitness = false); +bool DecodeHexBlk(CBlock&, const std::string& strHexBlk); +uint256 ParseHashUV(const UniValue& v, const std::string& strName); +uint256 ParseHashStr(const std::string&, const std::string& strName); +std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strName); // core_write.cpp -extern std::string FormatScript(const CScript& script); -extern std::string EncodeHexTx(const CTransaction& tx); -extern void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex); -extern void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry); +std::string FormatScript(const CScript& script); +std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags = 0); +void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex); +void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry); #endif // BITCOIN_CORE_IO_H diff --git a/src/core_memusage.h b/src/core_memusage.h index b8e0f08bbf..0dcc24c40c 100644 --- a/src/core_memusage.h +++ b/src/core_memusage.h @@ -69,8 +69,8 @@ static inline size_t RecursiveDynamicUsage(const CMutableTransaction& tx) { static inline size_t RecursiveDynamicUsage(const CBlock& block) { size_t mem = memusage::DynamicUsage(block.vtx); - for (std::vector<CTransaction>::const_iterator it = block.vtx.begin(); it != block.vtx.end(); it++) { - mem += RecursiveDynamicUsage(*it); + for (const auto& tx : block.vtx) { + mem += memusage::DynamicUsage(tx) + RecursiveDynamicUsage(*tx); } return mem; } diff --git a/src/core_read.cpp b/src/core_read.cpp index 7cfda6dd6d..b6d0285459 100644 --- a/src/core_read.cpp +++ b/src/core_read.cpp @@ -90,7 +90,7 @@ CScript ParseScript(const std::string& s) return result; } -bool DecodeHexTx(CTransaction& tx, const std::string& strHexTx, bool fTryNoWitness) +bool DecodeHexTx(CMutableTransaction& tx, const std::string& strHexTx, bool fTryNoWitness) { if (!IsHex(strHexTx)) return false; diff --git a/src/core_write.cpp b/src/core_write.cpp index ea01ddc10d..9f859ba9e1 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -116,9 +116,9 @@ string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode) return str; } -string EncodeHexTx(const CTransaction& tx) +string EncodeHexTx(const CTransaction& tx, const int serialFlags) { - CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | serialFlags); ssTx << tx; return HexStr(ssTx.begin(), ssTx.end()); } diff --git a/src/crypto/ctaes/ctaes.c b/src/crypto/ctaes/ctaes.c index 2389fc0bb2..55962bf252 100644 --- a/src/crypto/ctaes/ctaes.c +++ b/src/crypto/ctaes/ctaes.c @@ -134,7 +134,7 @@ static void SubBytes(AES_state *s, int inv) { D = U7; } - /* Non-linear transformation (identical to the code in SubBytes) */ + /* Non-linear transformation (shared between the forward and backward case) */ M1 = T13 & T6; M6 = T3 & T16; M11 = T1 & T15; @@ -469,9 +469,9 @@ static void AES_encrypt(const AES_state* rounds, int nrounds, unsigned char* cip static void AES_decrypt(const AES_state* rounds, int nrounds, unsigned char* plain16, const unsigned char* cipher16) { /* Most AES decryption implementations use the alternate scheme - * (the Equivalent Inverse Cipher), which looks more like encryption, but - * needs different round constants. We can't reuse any code here anyway, so - * don't bother. */ + * (the Equivalent Inverse Cipher), which allows for more code reuse between + * the encryption and decryption code, but requires separate setup for both. + */ AES_state s = {{0}}; int round; diff --git a/src/crypto/ctaes/test.c b/src/crypto/ctaes/test.c index fce1696acd..21439a16f1 100644 --- a/src/crypto/ctaes/test.c +++ b/src/crypto/ctaes/test.c @@ -102,7 +102,7 @@ int main(void) { } } if (fail == 0) { - fprintf(stderr, "All tests succesful\n"); + fprintf(stderr, "All tests successful\n"); } else { fprintf(stderr, "%i tests failed\n", fail); } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 47bdb31b5b..4a79bbd17d 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -17,6 +17,9 @@ #include <leveldb/db.h> #include <leveldb/write_batch.h> +static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64; +static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024; + class dbwrapper_error : public std::runtime_error { public: @@ -60,12 +63,12 @@ public: void Write(const K& key, const V& value) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(ssKey.GetSerializeSize(key)); + ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(&ssKey[0], ssKey.size()); CDataStream ssValue(SER_DISK, CLIENT_VERSION); - ssValue.reserve(ssValue.GetSerializeSize(value)); + ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE); ssValue << value; ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); leveldb::Slice slValue(&ssValue[0], ssValue.size()); @@ -77,7 +80,7 @@ public: void Erase(const K& key) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(ssKey.GetSerializeSize(key)); + ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(&ssKey[0], ssKey.size()); @@ -107,7 +110,7 @@ public: template<typename K> void Seek(const K& key) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(ssKey.GetSerializeSize(key)); + ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(&ssKey[0], ssKey.size()); piter->Seek(slKey); @@ -200,7 +203,7 @@ public: bool Read(const K& key, V& value) const { CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(ssKey.GetSerializeSize(key)); + ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(&ssKey[0], ssKey.size()); @@ -234,7 +237,7 @@ public: bool Exists(const K& key) const { CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(ssKey.GetSerializeSize(key)); + ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(&ssKey[0], ssKey.size()); diff --git a/src/hash.h b/src/hash.h index db4e130ae7..94e7f5ea6c 100644 --- a/src/hash.h +++ b/src/hash.h @@ -132,15 +132,17 @@ class CHashWriter private: CHash256 ctx; + const int nType; + const int nVersion; public: - int nType; - int nVersion; CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {} - CHashWriter& write(const char *pch, size_t size) { + int GetType() const { return nType; } + int GetVersion() const { return nVersion; } + + void write(const char *pch, size_t size) { ctx.Write((const unsigned char*)pch, size); - return (*this); } // invalidates the object @@ -153,7 +155,7 @@ public: template<typename T> CHashWriter& operator<<(const T& obj) { // Serialize to this stream - ::Serialize(*this, obj, nType, nVersion); + ::Serialize(*this, obj); return (*this); } }; diff --git a/src/init.cpp b/src/init.cpp index 1a500792a3..73f885a1bb 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -19,10 +19,11 @@ #include "httpserver.h" #include "httprpc.h" #include "key.h" -#include "main.h" +#include "validation.h" #include "miner.h" #include "netbase.h" #include "net.h" +#include "net_processing.h" #include "policy/policy.h" #include "rpc/server.h" #include "rpc/register.h" @@ -207,6 +208,7 @@ void Shutdown() StopTorControl(); UnregisterNodeSignals(GetNodeSignals()); + DumpMempool(); if (fFeeEstimatesInitialized) { @@ -359,13 +361,13 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-banscore=<n>", strprintf(_("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD)); strUsage += HelpMessageOpt("-bantime=<n>", strprintf(_("Number of seconds to keep misbehaving peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME)); strUsage += HelpMessageOpt("-bind=<addr>", _("Bind to given address and always listen on it. Use [host]:port notation for IPv6")); - strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s)")); + strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s); -noconnect or -connect=0 alone to disable automatic connections")); strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt("-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %u)"), DEFAULT_NAME_LOOKUP)); - strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)")); + strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect/-noconnect)")); strUsage += HelpMessageOpt("-externalip=<ip>", _("Specify your own public address")); strUsage += HelpMessageOpt("-forcednsseed", strprintf(_("Always query for peer addresses via DNS lookup (default: %u)"), DEFAULT_FORCEDNSSEED)); - strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect)")); + strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect/-noconnect)")); strUsage += HelpMessageOpt("-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION)); strUsage += HelpMessageOpt("-maxconnections=<n>", strprintf(_("Maintain at most <n> connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS)); strUsage += HelpMessageOpt("-maxreceivebuffer=<n>", strprintf(_("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER)); @@ -378,6 +380,7 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-port=<port>", strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort())); strUsage += HelpMessageOpt("-proxy=<ip:port>", _("Connect through SOCKS5 proxy")); strUsage += HelpMessageOpt("-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE)); + strUsage += HelpMessageOpt("-rpcserialversion", strprintf(_("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(>0) (default: %d)"), DEFAULT_RPC_SERIALIZE_VERSION)); strUsage += HelpMessageOpt("-seednode=<ip>", _("Connect to a node to retrieve peer addresses, and disconnect")); strUsage += HelpMessageOpt("-timeout=<n>", strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT)); strUsage += HelpMessageOpt("-torcontrol=<ip>:<port>", strprintf(_("Tor control port to use if onion listening enabled (default: %s)"), DEFAULT_TOR_CONTROL)); @@ -390,7 +393,7 @@ std::string HelpMessage(HelpMessageMode mode) #endif #endif strUsage += HelpMessageOpt("-whitebind=<addr>", _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6")); - strUsage += HelpMessageOpt("-whitelist=<netmask>", _("Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.") + + strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway")); strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY)); @@ -600,6 +603,8 @@ void ThreadImport(std::vector<boost::filesystem::path> vImportFiles) { const CChainParams& chainparams = Params(); RenameThread("bitcoin-loadblk"); + + { CImportingNow imp; // -reindex @@ -659,6 +664,8 @@ void ThreadImport(std::vector<boost::filesystem::path> vImportFiles) LogPrintf("Stopping after block import\n"); StartShutdown(); } + } // End scope of CImportingNow + LoadMempool(); } /** Sanity checks @@ -746,23 +753,10 @@ void InitParameterInteraction() LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__); } - if (GetBoolArg("-salvagewallet", false)) { - // Rewrite just private keys: rescan to find transactions - if (SoftSetBoolArg("-rescan", true)) - LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting -rescan=1\n", __func__); - } - - // -zapwallettx implies a rescan - if (GetBoolArg("-zapwallettxes", false)) { - if (SoftSetBoolArg("-rescan", true)) - LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting -rescan=1\n", __func__); - } - - // disable walletbroadcast and whitelistrelay in blocksonly mode + // disable whitelistrelay in blocksonly mode if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { if (SoftSetBoolArg("-whitelistrelay", false)) LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__); - // walletbroadcast is disabled in CWallet::ParameterInteraction() } // Forcing relay from whitelisted hosts implies we will accept relays from them in the first place. @@ -788,10 +782,17 @@ void InitLogging() LogPrintf("Bitcoin version %s\n", FormatFullVersion()); } -/** Initialize bitcoin. - * @pre Parameters should be parsed and config file should be read. - */ -bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) +namespace { // Variables internal to initialization process only + +ServiceFlags nRelevantServices = NODE_NETWORK; +int nMaxConnections; +int nUserMaxConnections; +int nFD; +ServiceFlags nLocalServices = NODE_NETWORK; + +} + +bool AppInitBasicSetup() { // ********************************************************* Step 1: setup #ifdef _MSC_VER @@ -843,9 +844,13 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) // Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly signal(SIGPIPE, SIG_IGN); #endif + return true; +} - // ********************************************************* Step 2: parameter interactions +bool AppInitParameterInteraction() +{ const CChainParams& chainparams = Params(); + // ********************************************************* Step 2: parameter interactions // also see: InitParameterInteraction() @@ -856,13 +861,15 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) } // Make sure enough file descriptors are available - int nBind = std::max((int)mapArgs.count("-bind") + (int)mapArgs.count("-whitebind"), 1); - int nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); - int nMaxConnections = std::max(nUserMaxConnections, 0); + int nBind = std::max( + (mapMultiArgs.count("-bind") ? mapMultiArgs.at("-bind").size() : 0) + + (mapMultiArgs.count("-whitebind") ? mapMultiArgs.at("-whitebind").size() : 0), size_t(1)); + nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); + nMaxConnections = std::max(nUserMaxConnections, 0); // Trim requested connection counts, to fit into system limitations nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS)), 0); - int nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS); + nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS, nMaxConnections); @@ -920,8 +927,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; - fServer = GetBoolArg("-server", false); - // block pruning; get the amount of disk space (in MiB) to allot for block & undo files int64_t nSignedPruneTarget = GetArg("-prune", 0) * 1024 * 1024; if (nSignedPruneTarget < 0) { @@ -960,8 +965,8 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) ::minRelayTxFee = CFeeRate(n); } - fRequireStandard = !GetBoolArg("-acceptnonstdtxn", !Params().RequireStandard()); - if (Params().RequireStandard() && !fRequireStandard) + fRequireStandard = !GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); + if (chainparams.RequireStandard() && !fRequireStandard) return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); nBytesPerSigOp = GetArg("-bytespersigop", nBytesPerSigOp); @@ -977,12 +982,12 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) // Option to startup with mocktime set (used for regression testing): SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op - ServiceFlags nLocalServices = NODE_NETWORK; - ServiceFlags nRelevantServices = NODE_NETWORK; - if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM); + if (GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) < 0) + return InitError("rpcserialversion must be non-negative."); + nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); fEnableReplacement = GetBoolArg("-mempoolreplacement", DEFAULT_ENABLE_REPLACEMENT); @@ -996,7 +1001,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) if (!mapMultiArgs["-bip9params"].empty()) { // Allow overriding BIP9 parameters for testing - if (!Params().MineBlocksOnDemand()) { + if (!chainparams.MineBlocksOnDemand()) { return InitError("BIP9 parameters may only be overridden on regtest."); } const vector<string>& deployments = mapMultiArgs["-bip9params"]; @@ -1028,17 +1033,11 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) } } } + return true; +} - // ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log - - // Initialize elliptic curve code - ECC_Start(); - globalVerifyHandle.reset(new ECCVerifyHandle()); - - // Sanity check - if (!InitSanityCheck()) - return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); - +static bool LockDataDirectory(bool probeOnly) +{ std::string strDataDir = GetDataDir().string(); // Make sure only a single Bitcoin process is using the data directory. @@ -1048,11 +1047,45 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) try { static boost::interprocess::file_lock lock(pathLockFile.string().c_str()); - if (!lock.try_lock()) + if (!lock.try_lock()) { return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), strDataDir, _(PACKAGE_NAME))); + } + if (probeOnly) { + lock.unlock(); + } } catch(const boost::interprocess::interprocess_exception& e) { return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what())); } + return true; +} + +bool AppInitSanityChecks() +{ + // ********************************************************* Step 4: sanity checks + + // Initialize elliptic curve code + ECC_Start(); + globalVerifyHandle.reset(new ECCVerifyHandle()); + + // Sanity check + if (!InitSanityCheck()) + return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); + + // Probe the data directory lock to give an early error message, if possible + return LockDataDirectory(true); +} + +bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) +{ + const CChainParams& chainparams = Params(); + // ********************************************************* Step 4a: application initialization + // After daemonization get the data directory lock again and hold on to it until exit + // This creates a slight window for a race condition to happen, however this condition is harmless: it + // will at most make us exit without printing a message to console. + if (!LockDataDirectory(false)) { + // Detailed error printed inside LockDataDirectory + return false; + } #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); @@ -1066,7 +1099,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) if (!fLogTimestamps) LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime())); LogPrintf("Default data directory %s\n", GetDefaultDataDir().string()); - LogPrintf("Using data directory %s\n", strDataDir); + LogPrintf("Using data directory %s\n", GetDataDir().string()); LogPrintf("Using config file %s\n", GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); LogPrintf("Using at most %i connections (%i file descriptors available)\n", nMaxConnections, nFD); @@ -1087,7 +1120,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) * that the server is there and will be ready later). Warmup mode will * be disabled when initialisation is finished. */ - if (fServer) + if (GetBoolArg("-server", false)) { uiInterface.InitMessage.connect(SetRPCWarmupStatus); if (!AppInitServers(threadGroup)) @@ -1102,6 +1135,10 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) return false; #endif // ********************************************************* Step 6: network initialization + // Note that we absolutely cannot open any actual connections + // until the very end ("start node") as the UTXO/block state + // is not yet setup and may end up being set up twice if we + // need to reindex later. assert(!g_connman); g_connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()))); @@ -1322,7 +1359,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) CleanupBlockRevFiles(); } - if (!LoadBlockIndex()) { + if (!LoadBlockIndex(chainparams)) { strLoadError = _("Error loading block database"); break; } @@ -1449,7 +1486,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) } } - if (Params().GetConsensus().vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) { + if (chainparams.GetConsensus().vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) { // Only advertize witness capabilities if they have a reasonable start time. // This allows us to have the code merged without a defined softfork, by setting its // end time to 0. @@ -1495,13 +1532,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait); } -#ifdef ENABLE_WALLET - // Add wallet transactions that aren't already in a block to mempool - // Do this here as mempool requires genesis block to be loaded - if (pwalletMain) - pwalletMain->ReacceptWalletTransactions(); -#endif - // ********************************************************* Step 11: start node //// debug print @@ -1539,10 +1569,8 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler) uiInterface.InitMessage(_("Done loading")); #ifdef ENABLE_WALLET - if (pwalletMain) { - // Run a thread to flush wallet periodically - threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(pwalletMain->strWalletFile))); - } + if (pwalletMain) + pwalletMain->postInitProcess(threadGroup); #endif return !fRequestShutdown; diff --git a/src/init.h b/src/init.h index 63e07ccb3c..1b87e0ec52 100644 --- a/src/init.h +++ b/src/init.h @@ -25,7 +25,30 @@ void Shutdown(); void InitLogging(); //!Parameter interaction: change current parameters depending on various rules void InitParameterInteraction(); -bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler); + +/** Initialize bitcoin core: Basic context setup. + * @note This can be done before daemonization. + * @pre Parameters should be parsed and config file should be read. + */ +bool AppInitBasicSetup(); +/** + * Initialization: parameter interaction. + * @note This can be done before daemonization. + * @pre Parameters should be parsed and config file should be read, AppInitBasicSetup should have been called. + */ +bool AppInitParameterInteraction(); +/** + * Initialization sanity checks: ecc init, sanity checks, dir lock. + * @note This can be done before daemonization. + * @pre Parameters should be parsed and config file should be read, AppInitParameterInteraction should have been called. + */ +bool AppInitSanityChecks(); +/** + * Bitcoin core main initialization. + * @note This should only be done after daemonization. + * @pre Parameters should be parsed and config file should be read, AppInitSanityChecks should have been called. + */ +bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler); /** The help message mode determines what help message to show */ enum HelpMessageMode { diff --git a/src/key.cpp b/src/key.cpp index aae9b042ac..b3ea98fb92 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -125,8 +125,8 @@ bool CKey::Check(const unsigned char *vch) { void CKey::MakeNewKey(bool fCompressedIn) { do { - GetStrongRandBytes(vch, sizeof(vch)); - } while (!Check(vch)); + GetStrongRandBytes(keydata.data(), keydata.size()); + } while (!Check(keydata.data())); fValid = true; fCompressed = fCompressedIn; } @@ -224,20 +224,18 @@ bool CKey::Load(CPrivKey &privkey, CPubKey &vchPubKey, bool fSkipCheck=false) { bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const { assert(IsValid()); assert(IsCompressed()); - unsigned char out[64]; - LockObject(out); + std::vector<unsigned char, secure_allocator<unsigned char>> vout(64); if ((nChild >> 31) == 0) { CPubKey pubkey = GetPubKey(); assert(pubkey.begin() + 33 == pubkey.end()); - BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, out); + BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, vout.data()); } else { assert(begin() + 32 == end()); - BIP32Hash(cc, nChild, 0, begin(), out); + BIP32Hash(cc, nChild, 0, begin(), vout.data()); } - memcpy(ccChild.begin(), out+32, 32); + memcpy(ccChild.begin(), vout.data()+32, 32); memcpy((unsigned char*)keyChild.begin(), begin(), 32); - bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), out); - UnlockObject(out); + bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), vout.data()); keyChild.fCompressed = true; keyChild.fValid = ret; return ret; @@ -253,12 +251,10 @@ bool CExtKey::Derive(CExtKey &out, unsigned int _nChild) const { void CExtKey::SetMaster(const unsigned char *seed, unsigned int nSeedLen) { static const unsigned char hashkey[] = {'B','i','t','c','o','i','n',' ','s','e','e','d'}; - unsigned char out[64]; - LockObject(out); - CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(out); - key.Set(&out[0], &out[32], true); - memcpy(chaincode.begin(), &out[32], 32); - UnlockObject(out); + std::vector<unsigned char, secure_allocator<unsigned char>> vout(64); + CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(vout.data()); + key.Set(&vout[0], &vout[32], true); + memcpy(chaincode.begin(), &vout[32], 32); nDepth = 0; nChild = 0; memset(vchFingerprint, 0, sizeof(vchFingerprint)); @@ -308,12 +304,10 @@ void ECC_Start() { { // Pass in a random blinding seed to the secp256k1 context. - unsigned char seed[32]; - LockObject(seed); - GetRandBytes(seed, 32); - bool ret = secp256k1_context_randomize(ctx, seed); + std::vector<unsigned char, secure_allocator<unsigned char>> vseed(32); + GetRandBytes(vseed.data(), 32); + bool ret = secp256k1_context_randomize(ctx, vseed.data()); assert(ret); - UnlockObject(seed); } secp256k1_context_sign = ctx; @@ -43,9 +43,7 @@ private: bool fCompressed; //! The actual byte data - unsigned char vch[32]; - - static_assert(sizeof(vch) == 32, "vch must be 32 bytes in length to not break serialization"); + std::vector<unsigned char, secure_allocator<unsigned char> > keydata; //! Check whether the 32-byte array pointed to be vch is valid keydata. bool static Check(const unsigned char* vch); @@ -54,37 +52,30 @@ public: //! Construct an invalid private key. CKey() : fValid(false), fCompressed(false) { - LockObject(vch); - } - - //! Copy constructor. This is necessary because of memlocking. - CKey(const CKey& secret) : fValid(secret.fValid), fCompressed(secret.fCompressed) - { - LockObject(vch); - memcpy(vch, secret.vch, sizeof(vch)); + // Important: vch must be 32 bytes in length to not break serialization + keydata.resize(32); } //! Destructor (again necessary because of memlocking). ~CKey() { - UnlockObject(vch); } friend bool operator==(const CKey& a, const CKey& b) { return a.fCompressed == b.fCompressed && a.size() == b.size() && - memcmp(&a.vch[0], &b.vch[0], a.size()) == 0; + memcmp(a.keydata.data(), b.keydata.data(), a.size()) == 0; } //! Initialize using begin and end iterators to byte data. template <typename T> void Set(const T pbegin, const T pend, bool fCompressedIn) { - if (pend - pbegin != sizeof(vch)) { + if (size_t(pend - pbegin) != keydata.size()) { fValid = false; } else if (Check(&pbegin[0])) { - memcpy(vch, (unsigned char*)&pbegin[0], sizeof(vch)); + memcpy(keydata.data(), (unsigned char*)&pbegin[0], keydata.size()); fValid = true; fCompressed = fCompressedIn; } else { @@ -93,9 +84,9 @@ public: } //! Simple read-only vector-like interface. - unsigned int size() const { return (fValid ? sizeof(vch) : 0); } - const unsigned char* begin() const { return vch; } - const unsigned char* end() const { return vch + size(); } + unsigned int size() const { return (fValid ? keydata.size() : 0); } + const unsigned char* begin() const { return keydata.data(); } + const unsigned char* end() const { return keydata.data() + size(); } //! Check whether this private key is valid. bool IsValid() const { return fValid; } @@ -171,7 +162,7 @@ struct CExtKey { CExtPubKey Neuter() const; void SetMaster(const unsigned char* seed, unsigned int nSeedLen); template <typename Stream> - void Serialize(Stream& s, int nType, int nVersion) const + void Serialize(Stream& s) const { unsigned int len = BIP32_EXTKEY_SIZE; ::WriteCompactSize(s, len); @@ -180,7 +171,7 @@ struct CExtKey { s.write((const char *)&code[0], len); } template <typename Stream> - void Unserialize(Stream& s, int nType, int nVersion) + void Unserialize(Stream& s) { unsigned int len = ::ReadCompactSize(s); unsigned char code[BIP32_EXTKEY_SIZE]; diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml new file mode 100644 index 0000000000..f5bd74c454 --- /dev/null +++ b/src/leveldb/.travis.yml @@ -0,0 +1,13 @@ +language: cpp +compiler: +- clang +- gcc +os: +- linux +- osx +sudo: false +before_install: +- echo $LANG +- echo $LC_ALL +script: +- make -j 4 check diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile index 2bd2cadcdd..07a5a1ead6 100644 --- a/src/leveldb/Makefile +++ b/src/leveldb/Makefile @@ -20,208 +20,395 @@ $(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \ # this file is generated by the previous line to set build flags and sources include build_config.mk +TESTS = \ + db/autocompact_test \ + db/c_test \ + db/corruption_test \ + db/db_test \ + db/dbformat_test \ + db/fault_injection_test \ + db/filename_test \ + db/log_test \ + db/recovery_test \ + db/skiplist_test \ + db/version_edit_test \ + db/version_set_test \ + db/write_batch_test \ + helpers/memenv/memenv_test \ + issues/issue178_test \ + issues/issue200_test \ + table/filter_block_test \ + table/table_test \ + util/arena_test \ + util/bloom_test \ + util/cache_test \ + util/coding_test \ + util/crc32c_test \ + util/env_test \ + util/hash_test + +UTILS = \ + db/db_bench \ + db/leveldbutil + +# Put the object files in a subdirectory, but the application at the top of the object dir. +PROGNAMES := $(notdir $(TESTS) $(UTILS)) + +# On Linux may need libkyotocabinet-dev for dependency. +BENCHMARKS = \ + doc/bench/db_bench_sqlite3 \ + doc/bench/db_bench_tree_db + CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) LDFLAGS += $(PLATFORM_LDFLAGS) LIBS += $(PLATFORM_LIBS) -LIBOBJECTS = $(SOURCES:.cc=.o) -MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o) - -TESTUTIL = ./util/testutil.o -TESTHARNESS = ./util/testharness.o $(TESTUTIL) +SIMULATOR_OUTDIR=out-ios-x86 +DEVICE_OUTDIR=out-ios-arm -# Note: iOS should probably be using libtool, not ar. ifeq ($(PLATFORM), IOS) +# Note: iOS should probably be using libtool, not ar. AR=xcrun ar +SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path) +DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path) +DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64 +SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64 +STATIC_OUTDIR=out-ios-universal +else +STATIC_OUTDIR=out-static +SHARED_OUTDIR=out-shared +STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES)) +SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench) endif -TESTS = \ - arena_test \ - autocompact_test \ - bloom_test \ - c_test \ - cache_test \ - coding_test \ - corruption_test \ - crc32c_test \ - db_test \ - dbformat_test \ - env_test \ - filename_test \ - filter_block_test \ - hash_test \ - issue178_test \ - issue200_test \ - log_test \ - memenv_test \ - skiplist_test \ - table_test \ - version_edit_test \ - version_set_test \ - write_batch_test - -PROGRAMS = db_bench leveldbutil $(TESTS) -BENCHMARKS = db_bench_sqlite3 db_bench_tree_db - -LIBRARY = libleveldb.a -MEMENVLIBRARY = libmemenv.a +STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o)) +STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) + +DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o)) +DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) + +SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o)) +SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) + +SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o)) +SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) + +TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o +TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL) + +STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS))) +STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS))) +STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS) +DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS) +SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS) default: all # Should we build shared libraries? ifneq ($(PLATFORM_SHARED_EXT),) +# Many leveldb test apps use non-exported API's. Only build a subset for testing. +SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS) + ifneq ($(PLATFORM_SHARED_VERSIONED),true) -SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT) -SHARED2 = $(SHARED1) -SHARED3 = $(SHARED1) -SHARED = $(SHARED1) +SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT) +SHARED_LIB2 = $(SHARED_LIB1) +SHARED_LIB3 = $(SHARED_LIB1) +SHARED_LIBS = $(SHARED_LIB1) +SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a else # Update db.h if you change these. -SHARED_MAJOR = 1 -SHARED_MINOR = 18 -SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT) -SHARED2 = $(SHARED1).$(SHARED_MAJOR) -SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR) -SHARED = $(SHARED1) $(SHARED2) $(SHARED3) -$(SHARED1): $(SHARED3) - ln -fs $(SHARED3) $(SHARED1) -$(SHARED2): $(SHARED3) - ln -fs $(SHARED3) $(SHARED2) +SHARED_VERSION_MAJOR = 1 +SHARED_VERSION_MINOR = 19 +SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT) +SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR) +SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR) +SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3) +$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3) + ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1) +$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3) + ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2) +SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a endif -$(SHARED3): - $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED2) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SOURCES) -o $(SHARED3) $(LIBS) +$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS) + $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS) endif # PLATFORM_SHARED_EXT -all: $(SHARED) $(LIBRARY) +all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS) -check: all $(PROGRAMS) $(TESTS) - for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done +check: $(STATIC_PROGRAMS) + for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done clean: - -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) */*.o */*/*.o ios-x86/*/*.o ios-arm/*/*.o build_config.mk - -rm -rf ios-x86/* ios-arm/* + -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal + -rm -f build_config.mk + -rm -rf ios-x86 ios-arm -$(LIBRARY): $(LIBOBJECTS) - rm -f $@ - $(AR) -rs $@ $(LIBOBJECTS) +$(STATIC_OUTDIR): + mkdir $@ -db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS) +$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR) + mkdir $@ -db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS) +$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR) + mkdir -p $@ -db_bench_tree_db: doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS) +$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR) + mkdir $@ -leveldbutil: db/leveldb_main.o $(LIBOBJECTS) - $(CXX) $(LDFLAGS) db/leveldb_main.o $(LIBOBJECTS) -o $@ $(LIBS) +$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR) + mkdir $@ -arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR) + mkdir $@ -autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +.PHONY: STATIC_OBJDIRS +STATIC_OBJDIRS: \ + $(STATIC_OUTDIR)/db \ + $(STATIC_OUTDIR)/port \ + $(STATIC_OUTDIR)/table \ + $(STATIC_OUTDIR)/util \ + $(STATIC_OUTDIR)/helpers/memenv -bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR): + mkdir $@ -c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR) + mkdir $@ -cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR) + mkdir -p $@ -coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR) + mkdir $@ -corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR) + mkdir $@ -crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR) + mkdir $@ -db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +.PHONY: SHARED_OBJDIRS +SHARED_OBJDIRS: \ + $(SHARED_OUTDIR)/db \ + $(SHARED_OUTDIR)/port \ + $(SHARED_OUTDIR)/table \ + $(SHARED_OUTDIR)/util \ + $(SHARED_OUTDIR)/helpers/memenv -dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR): + mkdir $@ -env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR) + mkdir $@ -filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR) + mkdir -p $@ -filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR) + mkdir $@ -hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR) + mkdir $@ -issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR) + mkdir $@ -issue200_test: issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +.PHONY: DEVICE_OBJDIRS +DEVICE_OBJDIRS: \ + $(DEVICE_OUTDIR)/db \ + $(DEVICE_OUTDIR)/port \ + $(DEVICE_OUTDIR)/table \ + $(DEVICE_OUTDIR)/util \ + $(DEVICE_OUTDIR)/helpers/memenv -log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR): + mkdir $@ -table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR) + mkdir $@ -skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR) + mkdir -p $@ -version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR) + mkdir $@ -version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR) + mkdir $@ -write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) +$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR) + mkdir $@ -$(MEMENVLIBRARY) : $(MEMENVOBJECTS) - rm -f $@ - $(AR) -rs $@ $(MEMENVOBJECTS) +.PHONY: SIMULATOR_OBJDIRS +SIMULATOR_OBJDIRS: \ + $(SIMULATOR_OUTDIR)/db \ + $(SIMULATOR_OUTDIR)/port \ + $(SIMULATOR_OUTDIR)/table \ + $(SIMULATOR_OUTDIR)/util \ + $(SIMULATOR_OUTDIR)/helpers/memenv -memenv_test : helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) - $(CXX) $(LDFLAGS) helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) -o $@ $(LIBS) +$(STATIC_ALLOBJS): | STATIC_OBJDIRS +$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS +$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS +$(SHARED_ALLOBJS): | SHARED_OBJDIRS ifeq ($(PLATFORM), IOS) -# For iOS, create universal object files to be used on both the simulator and +$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS) + rm -f $@ + $(AR) -rs $@ $(DEVICE_LIBOBJECTS) + +$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS) + rm -f $@ + $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS) + +$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS) + rm -f $@ + $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS) + +$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS) + rm -f $@ + $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS) + +# For iOS, create universal object libraries to be used on both the simulator and # a device. -PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms -SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer -DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer -IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString) -IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64 - -.cc.o: - mkdir -p ios-x86/$(dir $@) - xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ - mkdir -p ios-arm/$(dir $@) - xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@ - xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@ - -.c.o: - mkdir -p ios-x86/$(dir $@) - xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ - mkdir -p ios-arm/$(dir $@) - xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@ - xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@ +$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a + lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@ +$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a + lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@ else -.cc.o: +$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS) + rm -f $@ + $(AR) -rs $@ $(STATIC_LIBOBJECTS) + +$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS) + rm -f $@ + $(AR) -rs $@ $(STATIC_MEMENVOBJECTS) +endif + +$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS) + rm -f $@ + $(AR) -rs $@ $(SHARED_MEMENVOBJECTS) + +$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) + $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS) + +$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) + $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS) + +$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) + $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) + +$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) + $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS) + +$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL) + $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS) + +.PHONY: run-shared +run-shared: $(SHARED_OUTDIR)/db_bench + LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench + +$(SIMULATOR_OUTDIR)/%.o: %.cc + xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@ + +$(DEVICE_OUTDIR)/%.o: %.cc + xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@ + +$(SIMULATOR_OUTDIR)/%.o: %.c + xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@ + +$(DEVICE_OUTDIR)/%.o: %.c + xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@ + +$(STATIC_OUTDIR)/%.o: %.cc $(CXX) $(CXXFLAGS) -c $< -o $@ -.c.o: +$(STATIC_OUTDIR)/%.o: %.c $(CC) $(CFLAGS) -c $< -o $@ -endif + +$(SHARED_OUTDIR)/%.o: %.cc + $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ + +$(SHARED_OUTDIR)/%.o: %.c + $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ diff --git a/src/leveldb/README b/src/leveldb/README deleted file mode 100644 index 3618adeeed..0000000000 --- a/src/leveldb/README +++ /dev/null @@ -1,51 +0,0 @@ -leveldb: A key-value store -Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) - -The code under this directory implements a system for maintaining a -persistent key/value store. - -See doc/index.html for more explanation. -See doc/impl.html for a brief overview of the implementation. - -The public interface is in include/*.h. Callers should not include or -rely on the details of any other header files in this package. Those -internal APIs may be changed without warning. - -Guide to header files: - -include/db.h - Main interface to the DB: Start here - -include/options.h - Control over the behavior of an entire database, and also - control over the behavior of individual reads and writes. - -include/comparator.h - Abstraction for user-specified comparison function. If you want - just bytewise comparison of keys, you can use the default comparator, - but clients can write their own comparator implementations if they - want custom ordering (e.g. to handle different character - encodings, etc.) - -include/iterator.h - Interface for iterating over data. You can get an iterator - from a DB object. - -include/write_batch.h - Interface for atomically applying multiple updates to a database. - -include/slice.h - A simple module for maintaining a pointer and a length into some - other byte array. - -include/status.h - Status is returned from many of the public interfaces and is used - to report success and various kinds of errors. - -include/env.h - Abstraction of the OS environment. A posix implementation of - this interface is in util/env_posix.cc - -include/table.h -include/table_builder.h - Lower-level modules that most clients probably won't use directly diff --git a/src/leveldb/README.md b/src/leveldb/README.md index 480affb5ca..c75b185e0e 100644 --- a/src/leveldb/README.md +++ b/src/leveldb/README.md @@ -1,5 +1,7 @@ **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.** +[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb) + Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) # Features @@ -10,9 +12,11 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) * Multiple changes can be made in one atomic batch. * Users can create a transient snapshot to get a consistent view of data. * Forward and backward iteration is supported over the data. - * Data is automatically compressed using the [Snappy compression library](http://code.google.com/p/snappy). + * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/). * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions. - * [Detailed documentation](http://htmlpreview.github.io/?https://github.com/google/leveldb/blob/master/doc/index.html) about how to use the library is included with the source code. + +# Documentation + [LevelDB library documentation](https://rawgit.com/google/leveldb/master/doc/index.html) is online and bundled with the source code. # Limitations @@ -20,6 +24,37 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) * Only a single process (possibly multi-threaded) can access a particular database at a time. * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library. +# Contributing to the leveldb Project +The leveldb project welcomes contributions. leveldb's primary goal is to be +a reliable and fast key/value store. Changes that are in line with the +features/limitations outlined above, and meet the requirements below, +will be considered. + +Contribution requirements: + +1. **POSIX only**. We _generally_ will only accept changes that are both + compiled, and tested on a POSIX platform - usually Linux. Very small + changes will sometimes be accepted, but consider that more of an + exception than the rule. + +2. **Stable API**. We strive very hard to maintain a stable API. Changes that + require changes for projects using leveldb _might_ be rejected without + sufficient benefit to the project. + +3. **Tests**: All changes must be accompanied by a new (or changed) test, or + a sufficient explanation as to why a new (or changed) test is not required. + +## Submitting a Pull Request +Before any pull request will be accepted the author must first sign a +Contributor License Agreement (CLA) at https://cla.developers.google.com/. + +In order to keep the commit timeline linear +[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits) +your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase) +on google/leveldb/master. This keeps the commit timeline linear and more easily sync'ed +with the internal repository at Google. More information at GitHub's +[About Git rebase](https://help.github.com/articles/about-git-rebase/) page. + # Performance Here is a performance report (with explanations) from the run of the diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform index a1101c1bda..d7edab1d87 100755 --- a/src/leveldb/build_detect_platform +++ b/src/leveldb/build_detect_platform @@ -175,7 +175,7 @@ DIRS="$PREFIX/db $PREFIX/util $PREFIX/table" set -f # temporarily disable globbing so that our patterns aren't expanded PRUNE_TEST="-name *test*.cc -prune" PRUNE_BENCH="-name *_bench.cc -prune" -PRUNE_TOOL="-name leveldb_main.cc -prune" +PRUNE_TOOL="-name leveldbutil.cc -prune" PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "` set +f # re-enable globbing diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc index 96afc68913..37a484d25f 100644 --- a/src/leveldb/db/corruption_test.cc +++ b/src/leveldb/db/corruption_test.cc @@ -36,7 +36,7 @@ class CorruptionTest { tiny_cache_ = NewLRUCache(100); options_.env = &env_; options_.block_cache = tiny_cache_; - dbname_ = test::TmpDir() + "/db_test"; + dbname_ = test::TmpDir() + "/corruption_test"; DestroyDB(dbname_, options_); db_ = NULL; diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/db/db_bench.cc index 705a170aae..7a0f5e08cd 100644 --- a/src/leveldb/db/db_bench.cc +++ b/src/leveldb/db/db_bench.cc @@ -33,6 +33,7 @@ // readmissing -- read N missing keys in random order // readhot -- read N times in random order from 1% section of DB // seekrandom -- N random seeks +// open -- cost of opening a DB // crc32c -- repeated crc32c of 4K of data // acquireload -- load N*1000 times // Meta operations: @@ -99,6 +100,9 @@ static int FLAGS_bloom_bits = -1; // benchmark will fail. static bool FLAGS_use_existing_db = false; +// If true, reuse existing log/MANIFEST files when re-opening a database. +static bool FLAGS_reuse_logs = false; + // Use the db with the following name. static const char* FLAGS_db = NULL; @@ -138,6 +142,7 @@ class RandomGenerator { } }; +#if defined(__linux) static Slice TrimSpace(Slice s) { size_t start = 0; while (start < s.size() && isspace(s[start])) { @@ -149,6 +154,7 @@ static Slice TrimSpace(Slice s) { } return Slice(s.data() + start, limit - start); } +#endif static void AppendWithSpace(std::string* str, Slice msg) { if (msg.empty()) return; @@ -442,7 +448,11 @@ class Benchmark { bool fresh_db = false; int num_threads = FLAGS_threads; - if (name == Slice("fillseq")) { + if (name == Slice("open")) { + method = &Benchmark::OpenBench; + num_ /= 10000; + if (num_ < 1) num_ = 1; + } else if (name == Slice("fillseq")) { fresh_db = true; method = &Benchmark::WriteSeq; } else if (name == Slice("fillbatch")) { @@ -695,6 +705,7 @@ class Benchmark { options.write_buffer_size = FLAGS_write_buffer_size; options.max_open_files = FLAGS_open_files; options.filter_policy = filter_policy_; + options.reuse_logs = FLAGS_reuse_logs; Status s = DB::Open(options, FLAGS_db, &db_); if (!s.ok()) { fprintf(stderr, "open error: %s\n", s.ToString().c_str()); @@ -702,6 +713,14 @@ class Benchmark { } } + void OpenBench(ThreadState* thread) { + for (int i = 0; i < num_; i++) { + delete db_; + Open(); + thread->stats.FinishedSingleOp(); + } + } + void WriteSeq(ThreadState* thread) { DoWrite(thread, true); } @@ -941,6 +960,9 @@ int main(int argc, char** argv) { } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; + } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_reuse_logs = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc index 49b95953b4..60f4e66e55 100644 --- a/src/leveldb/db/db_impl.cc +++ b/src/leveldb/db/db_impl.cc @@ -125,7 +125,7 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) db_lock_(NULL), shutting_down_(NULL), bg_cv_(&mutex_), - mem_(new MemTable(internal_comparator_)), + mem_(NULL), imm_(NULL), logfile_(NULL), logfile_number_(0), @@ -134,7 +134,6 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) tmp_batch_(new WriteBatch), bg_compaction_scheduled_(false), manual_compaction_(NULL) { - mem_->Ref(); has_imm_.Release_Store(NULL); // Reserve ten files or so for other uses and give the rest to TableCache. @@ -271,7 +270,7 @@ void DBImpl::DeleteObsoleteFiles() { } } -Status DBImpl::Recover(VersionEdit* edit) { +Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) { mutex_.AssertHeld(); // Ignore error from CreateDir since the creation of the DB is @@ -301,66 +300,69 @@ Status DBImpl::Recover(VersionEdit* edit) { } } - s = versions_->Recover(); - if (s.ok()) { - SequenceNumber max_sequence(0); - - // Recover from all newer log files than the ones named in the - // descriptor (new log files may have been added by the previous - // incarnation without registering them in the descriptor). - // - // Note that PrevLogNumber() is no longer used, but we pay - // attention to it in case we are recovering a database - // produced by an older version of leveldb. - const uint64_t min_log = versions_->LogNumber(); - const uint64_t prev_log = versions_->PrevLogNumber(); - std::vector<std::string> filenames; - s = env_->GetChildren(dbname_, &filenames); + s = versions_->Recover(save_manifest); + if (!s.ok()) { + return s; + } + SequenceNumber max_sequence(0); + + // Recover from all newer log files than the ones named in the + // descriptor (new log files may have been added by the previous + // incarnation without registering them in the descriptor). + // + // Note that PrevLogNumber() is no longer used, but we pay + // attention to it in case we are recovering a database + // produced by an older version of leveldb. + const uint64_t min_log = versions_->LogNumber(); + const uint64_t prev_log = versions_->PrevLogNumber(); + std::vector<std::string> filenames; + s = env_->GetChildren(dbname_, &filenames); + if (!s.ok()) { + return s; + } + std::set<uint64_t> expected; + versions_->AddLiveFiles(&expected); + uint64_t number; + FileType type; + std::vector<uint64_t> logs; + for (size_t i = 0; i < filenames.size(); i++) { + if (ParseFileName(filenames[i], &number, &type)) { + expected.erase(number); + if (type == kLogFile && ((number >= min_log) || (number == prev_log))) + logs.push_back(number); + } + } + if (!expected.empty()) { + char buf[50]; + snprintf(buf, sizeof(buf), "%d missing files; e.g.", + static_cast<int>(expected.size())); + return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin()))); + } + + // Recover in the order in which the logs were generated + std::sort(logs.begin(), logs.end()); + for (size_t i = 0; i < logs.size(); i++) { + s = RecoverLogFile(logs[i], (i == logs.size() - 1), save_manifest, edit, + &max_sequence); if (!s.ok()) { return s; } - std::set<uint64_t> expected; - versions_->AddLiveFiles(&expected); - uint64_t number; - FileType type; - std::vector<uint64_t> logs; - for (size_t i = 0; i < filenames.size(); i++) { - if (ParseFileName(filenames[i], &number, &type)) { - expected.erase(number); - if (type == kLogFile && ((number >= min_log) || (number == prev_log))) - logs.push_back(number); - } - } - if (!expected.empty()) { - char buf[50]; - snprintf(buf, sizeof(buf), "%d missing files; e.g.", - static_cast<int>(expected.size())); - return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin()))); - } - - // Recover in the order in which the logs were generated - std::sort(logs.begin(), logs.end()); - for (size_t i = 0; i < logs.size(); i++) { - s = RecoverLogFile(logs[i], edit, &max_sequence); - // The previous incarnation may not have written any MANIFEST - // records after allocating this log number. So we manually - // update the file number allocation counter in VersionSet. - versions_->MarkFileNumberUsed(logs[i]); - } + // The previous incarnation may not have written any MANIFEST + // records after allocating this log number. So we manually + // update the file number allocation counter in VersionSet. + versions_->MarkFileNumberUsed(logs[i]); + } - if (s.ok()) { - if (versions_->LastSequence() < max_sequence) { - versions_->SetLastSequence(max_sequence); - } - } + if (versions_->LastSequence() < max_sequence) { + versions_->SetLastSequence(max_sequence); } - return s; + return Status::OK(); } -Status DBImpl::RecoverLogFile(uint64_t log_number, - VersionEdit* edit, +Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, + bool* save_manifest, VersionEdit* edit, SequenceNumber* max_sequence) { struct LogReporter : public log::Reader::Reporter { Env* env; @@ -405,6 +407,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, std::string scratch; Slice record; WriteBatch batch; + int compactions = 0; MemTable* mem = NULL; while (reader.ReadRecord(&record, &scratch) && status.ok()) { @@ -432,25 +435,52 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, } if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) { + compactions++; + *save_manifest = true; status = WriteLevel0Table(mem, edit, NULL); + mem->Unref(); + mem = NULL; if (!status.ok()) { // Reflect errors immediately so that conditions like full // file-systems cause the DB::Open() to fail. break; } - mem->Unref(); - mem = NULL; } } - if (status.ok() && mem != NULL) { - status = WriteLevel0Table(mem, edit, NULL); - // Reflect errors immediately so that conditions like full - // file-systems cause the DB::Open() to fail. + delete file; + + // See if we should keep reusing the last log file. + if (status.ok() && options_.reuse_logs && last_log && compactions == 0) { + assert(logfile_ == NULL); + assert(log_ == NULL); + assert(mem_ == NULL); + uint64_t lfile_size; + if (env_->GetFileSize(fname, &lfile_size).ok() && + env_->NewAppendableFile(fname, &logfile_).ok()) { + Log(options_.info_log, "Reusing old log %s \n", fname.c_str()); + log_ = new log::Writer(logfile_, lfile_size); + logfile_number_ = log_number; + if (mem != NULL) { + mem_ = mem; + mem = NULL; + } else { + // mem can be NULL if lognum exists but was empty. + mem_ = new MemTable(internal_comparator_); + mem_->Ref(); + } + } + } + + if (mem != NULL) { + // mem did not get reused; compact it. + if (status.ok()) { + *save_manifest = true; + status = WriteLevel0Table(mem, edit, NULL); + } + mem->Unref(); } - if (mem != NULL) mem->Unref(); - delete file; return status; } @@ -821,8 +851,9 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, delete iter; if (s.ok()) { Log(options_.info_log, - "Generated table #%llu: %lld keys, %lld bytes", + "Generated table #%llu@%d: %lld keys, %lld bytes", (unsigned long long) output_number, + compact->compaction->level(), (unsigned long long) current_entries, (unsigned long long) current_bytes); } @@ -1395,6 +1426,19 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { } else if (in == "sstables") { *value = versions_->current()->DebugString(); return true; + } else if (in == "approximate-memory-usage") { + size_t total_usage = options_.block_cache->TotalCharge(); + if (mem_) { + total_usage += mem_->ApproximateMemoryUsage(); + } + if (imm_) { + total_usage += imm_->ApproximateMemoryUsage(); + } + char buf[50]; + snprintf(buf, sizeof(buf), "%llu", + static_cast<unsigned long long>(total_usage)); + value->append(buf); + return true; } return false; @@ -1449,8 +1493,11 @@ Status DB::Open(const Options& options, const std::string& dbname, DBImpl* impl = new DBImpl(options, dbname); impl->mutex_.Lock(); VersionEdit edit; - Status s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists - if (s.ok()) { + // Recover handles create_if_missing, error_if_exists + bool save_manifest = false; + Status s = impl->Recover(&edit, &save_manifest); + if (s.ok() && impl->mem_ == NULL) { + // Create new log and a corresponding memtable. uint64_t new_log_number = impl->versions_->NewFileNumber(); WritableFile* lfile; s = options.env->NewWritableFile(LogFileName(dbname, new_log_number), @@ -1460,15 +1507,22 @@ Status DB::Open(const Options& options, const std::string& dbname, impl->logfile_ = lfile; impl->logfile_number_ = new_log_number; impl->log_ = new log::Writer(lfile); - s = impl->versions_->LogAndApply(&edit, &impl->mutex_); - } - if (s.ok()) { - impl->DeleteObsoleteFiles(); - impl->MaybeScheduleCompaction(); + impl->mem_ = new MemTable(impl->internal_comparator_); + impl->mem_->Ref(); } } + if (s.ok() && save_manifest) { + edit.SetPrevLogNumber(0); // No older logs needed after recovery. + edit.SetLogNumber(impl->logfile_number_); + s = impl->versions_->LogAndApply(&edit, &impl->mutex_); + } + if (s.ok()) { + impl->DeleteObsoleteFiles(); + impl->MaybeScheduleCompaction(); + } impl->mutex_.Unlock(); if (s.ok()) { + assert(impl->mem_ != NULL); *dbptr = impl; } else { delete impl; diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h index cfc998164a..8ff323e728 100644 --- a/src/leveldb/db/db_impl.h +++ b/src/leveldb/db/db_impl.h @@ -78,7 +78,8 @@ class DBImpl : public DB { // Recover the descriptor from persistent storage. May do a significant // amount of work to recover recently logged updates. Any changes to // be made to the descriptor are added to *edit. - Status Recover(VersionEdit* edit) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + Status Recover(VersionEdit* edit, bool* save_manifest) + EXCLUSIVE_LOCKS_REQUIRED(mutex_); void MaybeIgnoreError(Status* s) const; @@ -90,9 +91,8 @@ class DBImpl : public DB { // Errors are recorded in bg_error_. void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_); - Status RecoverLogFile(uint64_t log_number, - VersionEdit* edit, - SequenceNumber* max_sequence) + Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest, + VersionEdit* edit, SequenceNumber* max_sequence) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base) diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc index 0fed9137d5..a0b08bc19c 100644 --- a/src/leveldb/db/db_test.cc +++ b/src/leveldb/db/db_test.cc @@ -193,6 +193,7 @@ class DBTest { // Sequence of option configurations to try enum OptionConfig { kDefault, + kReuse, kFilter, kUncompressed, kEnd @@ -237,7 +238,11 @@ class DBTest { // Return the current option configuration. Options CurrentOptions() { Options options; + options.reuse_logs = false; switch (option_config_) { + case kReuse: + options.reuse_logs = true; + break; case kFilter: options.filter_policy = filter_policy_; break; @@ -558,6 +563,17 @@ TEST(DBTest, GetFromVersions) { } while (ChangeOptions()); } +TEST(DBTest, GetMemUsage) { + do { + ASSERT_OK(Put("foo", "v1")); + std::string val; + ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val)); + int mem_usage = atoi(val.c_str()); + ASSERT_GT(mem_usage, 0); + ASSERT_LT(mem_usage, 5*1024*1024); + } while (ChangeOptions()); +} + TEST(DBTest, GetSnapshot) { do { // Try with both a short key and a long key @@ -1080,6 +1096,14 @@ TEST(DBTest, ApproximateSizes) { // 0 because GetApproximateSizes() does not account for memtable space ASSERT_TRUE(Between(Size("", Key(50)), 0, 0)); + if (options.reuse_logs) { + // Recovery will reuse memtable, and GetApproximateSizes() does not + // account for memtable usage; + Reopen(&options); + ASSERT_TRUE(Between(Size("", Key(50)), 0, 0)); + continue; + } + // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { Reopen(&options); @@ -1123,6 +1147,11 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000))); ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000))); + if (options.reuse_logs) { + // Need to force a memtable compaction since recovery does not do so. + ASSERT_OK(dbfull()->TEST_CompactMemTable()); + } + // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { Reopen(&options); @@ -2084,7 +2113,8 @@ void BM_LogAndApply(int iters, int num_base_files) { InternalKeyComparator cmp(BytewiseComparator()); Options options; VersionSet vset(dbname, &options, NULL, &cmp); - ASSERT_OK(vset.Recover()); + bool save_manifest; + ASSERT_OK(vset.Recover(&save_manifest)); VersionEdit vbase; uint64_t fnum = 1; for (int i = 0; i < num_base_files; i++) { diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc new file mode 100644 index 0000000000..875dfe81ee --- /dev/null +++ b/src/leveldb/db/fault_injection_test.cc @@ -0,0 +1,554 @@ +// Copyright 2014 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// This test uses a custom Env to keep track of the state of a filesystem as of +// the last "sync". It then checks for data loss errors by purposely dropping +// file data (or entire files) not protected by a "sync". + +#include "leveldb/db.h" + +#include <map> +#include <set> +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/log_format.h" +#include "db/version_set.h" +#include "leveldb/cache.h" +#include "leveldb/env.h" +#include "leveldb/table.h" +#include "leveldb/write_batch.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace leveldb { + +static const int kValueSize = 1000; +static const int kMaxNumValues = 2000; +static const size_t kNumIterations = 3; + +class FaultInjectionTestEnv; + +namespace { + +// Assume a filename, and not a directory name like "/foo/bar/" +static std::string GetDirName(const std::string filename) { + size_t found = filename.find_last_of("/\\"); + if (found == std::string::npos) { + return ""; + } else { + return filename.substr(0, found); + } +} + +Status SyncDir(const std::string& dir) { + // As this is a test it isn't required to *actually* sync this directory. + return Status::OK(); +} + +// A basic file truncation function suitable for this test. +Status Truncate(const std::string& filename, uint64_t length) { + leveldb::Env* env = leveldb::Env::Default(); + + SequentialFile* orig_file; + Status s = env->NewSequentialFile(filename, &orig_file); + if (!s.ok()) + return s; + + char* scratch = new char[length]; + leveldb::Slice result; + s = orig_file->Read(length, &result, scratch); + delete orig_file; + if (s.ok()) { + std::string tmp_name = GetDirName(filename) + "/truncate.tmp"; + WritableFile* tmp_file; + s = env->NewWritableFile(tmp_name, &tmp_file); + if (s.ok()) { + s = tmp_file->Append(result); + delete tmp_file; + if (s.ok()) { + s = env->RenameFile(tmp_name, filename); + } else { + env->DeleteFile(tmp_name); + } + } + } + + delete[] scratch; + + return s; +} + +struct FileState { + std::string filename_; + ssize_t pos_; + ssize_t pos_at_last_sync_; + ssize_t pos_at_last_flush_; + + FileState(const std::string& filename) + : filename_(filename), + pos_(-1), + pos_at_last_sync_(-1), + pos_at_last_flush_(-1) { } + + FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {} + + bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; } + + Status DropUnsyncedData() const; +}; + +} // anonymous namespace + +// A wrapper around WritableFile which informs another Env whenever this file +// is written to or sync'ed. +class TestWritableFile : public WritableFile { + public: + TestWritableFile(const FileState& state, + WritableFile* f, + FaultInjectionTestEnv* env); + virtual ~TestWritableFile(); + virtual Status Append(const Slice& data); + virtual Status Close(); + virtual Status Flush(); + virtual Status Sync(); + + private: + FileState state_; + WritableFile* target_; + bool writable_file_opened_; + FaultInjectionTestEnv* env_; + + Status SyncParent(); +}; + +class FaultInjectionTestEnv : public EnvWrapper { + public: + FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {} + virtual ~FaultInjectionTestEnv() { } + virtual Status NewWritableFile(const std::string& fname, + WritableFile** result); + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result); + virtual Status DeleteFile(const std::string& f); + virtual Status RenameFile(const std::string& s, const std::string& t); + + void WritableFileClosed(const FileState& state); + Status DropUnsyncedFileData(); + Status DeleteFilesCreatedAfterLastDirSync(); + void DirWasSynced(); + bool IsFileCreatedSinceLastDirSync(const std::string& filename); + void ResetState(); + void UntrackFile(const std::string& f); + // Setting the filesystem to inactive is the test equivalent to simulating a + // system reset. Setting to inactive will freeze our saved filesystem state so + // that it will stop being recorded. It can then be reset back to the state at + // the time of the reset. + bool IsFilesystemActive() const { return filesystem_active_; } + void SetFilesystemActive(bool active) { filesystem_active_ = active; } + + private: + port::Mutex mutex_; + std::map<std::string, FileState> db_file_state_; + std::set<std::string> new_files_since_last_dir_sync_; + bool filesystem_active_; // Record flushes, syncs, writes +}; + +TestWritableFile::TestWritableFile(const FileState& state, + WritableFile* f, + FaultInjectionTestEnv* env) + : state_(state), + target_(f), + writable_file_opened_(true), + env_(env) { + assert(f != NULL); +} + +TestWritableFile::~TestWritableFile() { + if (writable_file_opened_) { + Close(); + } + delete target_; +} + +Status TestWritableFile::Append(const Slice& data) { + Status s = target_->Append(data); + if (s.ok() && env_->IsFilesystemActive()) { + state_.pos_ += data.size(); + } + return s; +} + +Status TestWritableFile::Close() { + writable_file_opened_ = false; + Status s = target_->Close(); + if (s.ok()) { + env_->WritableFileClosed(state_); + } + return s; +} + +Status TestWritableFile::Flush() { + Status s = target_->Flush(); + if (s.ok() && env_->IsFilesystemActive()) { + state_.pos_at_last_flush_ = state_.pos_; + } + return s; +} + +Status TestWritableFile::SyncParent() { + Status s = SyncDir(GetDirName(state_.filename_)); + if (s.ok()) { + env_->DirWasSynced(); + } + return s; +} + +Status TestWritableFile::Sync() { + if (!env_->IsFilesystemActive()) { + return Status::OK(); + } + // Ensure new files referred to by the manifest are in the filesystem. + Status s = target_->Sync(); + if (s.ok()) { + state_.pos_at_last_sync_ = state_.pos_; + } + if (env_->IsFileCreatedSinceLastDirSync(state_.filename_)) { + Status ps = SyncParent(); + if (s.ok() && !ps.ok()) { + s = ps; + } + } + return s; +} + +Status FaultInjectionTestEnv::NewWritableFile(const std::string& fname, + WritableFile** result) { + WritableFile* actual_writable_file; + Status s = target()->NewWritableFile(fname, &actual_writable_file); + if (s.ok()) { + FileState state(fname); + state.pos_ = 0; + *result = new TestWritableFile(state, actual_writable_file, this); + // NewWritableFile doesn't append to files, so if the same file is + // opened again then it will be truncated - so forget our saved + // state. + UntrackFile(fname); + MutexLock l(&mutex_); + new_files_since_last_dir_sync_.insert(fname); + } + return s; +} + +Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname, + WritableFile** result) { + WritableFile* actual_writable_file; + Status s = target()->NewAppendableFile(fname, &actual_writable_file); + if (s.ok()) { + FileState state(fname); + state.pos_ = 0; + { + MutexLock l(&mutex_); + if (db_file_state_.count(fname) == 0) { + new_files_since_last_dir_sync_.insert(fname); + } else { + state = db_file_state_[fname]; + } + } + *result = new TestWritableFile(state, actual_writable_file, this); + } + return s; +} + +Status FaultInjectionTestEnv::DropUnsyncedFileData() { + Status s; + MutexLock l(&mutex_); + for (std::map<std::string, FileState>::const_iterator it = + db_file_state_.begin(); + s.ok() && it != db_file_state_.end(); ++it) { + const FileState& state = it->second; + if (!state.IsFullySynced()) { + s = state.DropUnsyncedData(); + } + } + return s; +} + +void FaultInjectionTestEnv::DirWasSynced() { + MutexLock l(&mutex_); + new_files_since_last_dir_sync_.clear(); +} + +bool FaultInjectionTestEnv::IsFileCreatedSinceLastDirSync( + const std::string& filename) { + MutexLock l(&mutex_); + return new_files_since_last_dir_sync_.find(filename) != + new_files_since_last_dir_sync_.end(); +} + +void FaultInjectionTestEnv::UntrackFile(const std::string& f) { + MutexLock l(&mutex_); + db_file_state_.erase(f); + new_files_since_last_dir_sync_.erase(f); +} + +Status FaultInjectionTestEnv::DeleteFile(const std::string& f) { + Status s = EnvWrapper::DeleteFile(f); + ASSERT_OK(s); + if (s.ok()) { + UntrackFile(f); + } + return s; +} + +Status FaultInjectionTestEnv::RenameFile(const std::string& s, + const std::string& t) { + Status ret = EnvWrapper::RenameFile(s, t); + + if (ret.ok()) { + MutexLock l(&mutex_); + if (db_file_state_.find(s) != db_file_state_.end()) { + db_file_state_[t] = db_file_state_[s]; + db_file_state_.erase(s); + } + + if (new_files_since_last_dir_sync_.erase(s) != 0) { + assert(new_files_since_last_dir_sync_.find(t) == + new_files_since_last_dir_sync_.end()); + new_files_since_last_dir_sync_.insert(t); + } + } + + return ret; +} + +void FaultInjectionTestEnv::ResetState() { + // Since we are not destroying the database, the existing files + // should keep their recorded synced/flushed state. Therefore + // we do not reset db_file_state_ and new_files_since_last_dir_sync_. + MutexLock l(&mutex_); + SetFilesystemActive(true); +} + +Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { + // Because DeleteFile access this container make a copy to avoid deadlock + mutex_.Lock(); + std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(), + new_files_since_last_dir_sync_.end()); + mutex_.Unlock(); + Status s; + std::set<std::string>::const_iterator it; + for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) { + s = DeleteFile(*it); + } + return s; +} + +void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) { + MutexLock l(&mutex_); + db_file_state_[state.filename_] = state; +} + +Status FileState::DropUnsyncedData() const { + ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_; + return Truncate(filename_, sync_pos); +} + +class FaultInjectionTest { + public: + enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR }; + enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES }; + + FaultInjectionTestEnv* env_; + std::string dbname_; + Cache* tiny_cache_; + Options options_; + DB* db_; + + FaultInjectionTest() + : env_(new FaultInjectionTestEnv), + tiny_cache_(NewLRUCache(100)), + db_(NULL) { + dbname_ = test::TmpDir() + "/fault_test"; + DestroyDB(dbname_, Options()); // Destroy any db from earlier run + options_.reuse_logs = true; + options_.env = env_; + options_.paranoid_checks = true; + options_.block_cache = tiny_cache_; + options_.create_if_missing = true; + } + + ~FaultInjectionTest() { + CloseDB(); + DestroyDB(dbname_, Options()); + delete tiny_cache_; + delete env_; + } + + void ReuseLogs(bool reuse) { + options_.reuse_logs = reuse; + } + + void Build(int start_idx, int num_vals) { + std::string key_space, value_space; + WriteBatch batch; + for (int i = start_idx; i < start_idx + num_vals; i++) { + Slice key = Key(i, &key_space); + batch.Clear(); + batch.Put(key, Value(i, &value_space)); + WriteOptions options; + ASSERT_OK(db_->Write(options, &batch)); + } + } + + Status ReadValue(int i, std::string* val) const { + std::string key_space, value_space; + Slice key = Key(i, &key_space); + Value(i, &value_space); + ReadOptions options; + return db_->Get(options, key, val); + } + + Status Verify(int start_idx, int num_vals, + ExpectedVerifResult expected) const { + std::string val; + std::string value_space; + Status s; + for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) { + Value(i, &value_space); + s = ReadValue(i, &val); + if (expected == VAL_EXPECT_NO_ERROR) { + if (s.ok()) { + ASSERT_EQ(value_space, val); + } + } else if (s.ok()) { + fprintf(stderr, "Expected an error at %d, but was OK\n", i); + s = Status::IOError(dbname_, "Expected value error:"); + } else { + s = Status::OK(); // An expected error + } + } + return s; + } + + // Return the ith key + Slice Key(int i, std::string* storage) const { + char buf[100]; + snprintf(buf, sizeof(buf), "%016d", i); + storage->assign(buf, strlen(buf)); + return Slice(*storage); + } + + // Return the value to associate with the specified key + Slice Value(int k, std::string* storage) const { + Random r(k); + return test::RandomString(&r, kValueSize, storage); + } + + Status OpenDB() { + delete db_; + db_ = NULL; + env_->ResetState(); + return DB::Open(options_, dbname_, &db_); + } + + void CloseDB() { + delete db_; + db_ = NULL; + } + + void DeleteAllData() { + Iterator* iter = db_->NewIterator(ReadOptions()); + WriteOptions options; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); + } + + delete iter; + } + + void ResetDBState(ResetMethod reset_method) { + switch (reset_method) { + case RESET_DROP_UNSYNCED_DATA: + ASSERT_OK(env_->DropUnsyncedFileData()); + break; + case RESET_DELETE_UNSYNCED_FILES: + ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); + break; + default: + assert(false); + } + } + + void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) { + DeleteAllData(); + Build(0, num_pre_sync); + db_->CompactRange(NULL, NULL); + Build(num_pre_sync, num_post_sync); + } + + void PartialCompactTestReopenWithFault(ResetMethod reset_method, + int num_pre_sync, + int num_post_sync) { + env_->SetFilesystemActive(false); + CloseDB(); + ResetDBState(reset_method); + ASSERT_OK(OpenDB()); + ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); + ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR)); + } + + void NoWriteTestPreFault() { + } + + void NoWriteTestReopenWithFault(ResetMethod reset_method) { + CloseDB(); + ResetDBState(reset_method); + ASSERT_OK(OpenDB()); + } + + void DoTest() { + Random rnd(0); + ASSERT_OK(OpenDB()); + for (size_t idx = 0; idx < kNumIterations; idx++) { + int num_pre_sync = rnd.Uniform(kMaxNumValues); + int num_post_sync = rnd.Uniform(kMaxNumValues); + + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, + num_pre_sync, + num_post_sync); + + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(RESET_DROP_UNSYNCED_DATA); + + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + // No new files created so we expect all values since no files will be + // dropped. + PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES, + num_pre_sync + num_post_sync, + 0); + + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES); + } + } +}; + +TEST(FaultInjectionTest, FaultTestNoLogReuse) { + ReuseLogs(false); + DoTest(); +} + +TEST(FaultInjectionTest, FaultTestWithLogReuse) { + ReuseLogs(true); + DoTest(); +} + +} // namespace leveldb + +int main(int argc, char** argv) { + return leveldb::test::RunAllTests(); +} diff --git a/src/leveldb/db/leveldb_main.cc b/src/leveldb/db/leveldbutil.cc index 9f4b7dd70c..9f4b7dd70c 100644 --- a/src/leveldb/db/leveldb_main.cc +++ b/src/leveldb/db/leveldbutil.cc diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc index e44b66c85b..a6d304545d 100644 --- a/src/leveldb/db/log_reader.cc +++ b/src/leveldb/db/log_reader.cc @@ -25,7 +25,8 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum, eof_(false), last_record_offset_(0), end_of_buffer_offset_(0), - initial_offset_(initial_offset) { + initial_offset_(initial_offset), + resyncing_(initial_offset > 0) { } Reader::~Reader() { @@ -72,8 +73,25 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) { Slice fragment; while (true) { - uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size(); const unsigned int record_type = ReadPhysicalRecord(&fragment); + + // ReadPhysicalRecord may have only had an empty trailer remaining in its + // internal buffer. Calculate the offset of the next physical record now + // that it has returned, properly accounting for its header size. + uint64_t physical_record_offset = + end_of_buffer_offset_ - buffer_.size() - kHeaderSize - fragment.size(); + + if (resyncing_) { + if (record_type == kMiddleType) { + continue; + } else if (record_type == kLastType) { + resyncing_ = false; + continue; + } else { + resyncing_ = false; + } + } + switch (record_type) { case kFullType: if (in_fragmented_record) { diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h index 6aff791716..8389d61f8f 100644 --- a/src/leveldb/db/log_reader.h +++ b/src/leveldb/db/log_reader.h @@ -73,6 +73,11 @@ class Reader { // Offset at which to start looking for the first record to return uint64_t const initial_offset_; + // True if we are resynchronizing after a seek (initial_offset_ > 0). In + // particular, a run of kMiddleType and kLastType records can be silently + // skipped in this mode + bool resyncing_; + // Extend record types with the following special values enum { kEof = kMaxRecordType + 1, diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc index dcf0562652..48a5928657 100644 --- a/src/leveldb/db/log_test.cc +++ b/src/leveldb/db/log_test.cc @@ -79,7 +79,7 @@ class LogTest { virtual Status Skip(uint64_t n) { if (n > contents_.size()) { contents_.clear(); - return Status::NotFound("in-memory file skipepd past end"); + return Status::NotFound("in-memory file skipped past end"); } contents_.remove_prefix(n); @@ -104,23 +104,34 @@ class LogTest { StringSource source_; ReportCollector report_; bool reading_; - Writer writer_; - Reader reader_; + Writer* writer_; + Reader* reader_; // Record metadata for testing initial offset functionality static size_t initial_offset_record_sizes_[]; static uint64_t initial_offset_last_record_offsets_[]; + static int num_initial_offset_records_; public: LogTest() : reading_(false), - writer_(&dest_), - reader_(&source_, &report_, true/*checksum*/, - 0/*initial_offset*/) { + writer_(new Writer(&dest_)), + reader_(new Reader(&source_, &report_, true/*checksum*/, + 0/*initial_offset*/)) { + } + + ~LogTest() { + delete writer_; + delete reader_; + } + + void ReopenForAppend() { + delete writer_; + writer_ = new Writer(&dest_, dest_.contents_.size()); } void Write(const std::string& msg) { ASSERT_TRUE(!reading_) << "Write() after starting to read"; - writer_.AddRecord(Slice(msg)); + writer_->AddRecord(Slice(msg)); } size_t WrittenBytes() const { @@ -134,7 +145,7 @@ class LogTest { } std::string scratch; Slice record; - if (reader_.ReadRecord(&record, &scratch)) { + if (reader_->ReadRecord(&record, &scratch)) { return record.ToString(); } else { return "EOF"; @@ -182,13 +193,18 @@ class LogTest { } void WriteInitialOffsetLog() { - for (int i = 0; i < 4; i++) { + for (int i = 0; i < num_initial_offset_records_; i++) { std::string record(initial_offset_record_sizes_[i], static_cast<char>('a' + i)); Write(record); } } + void StartReadingAt(uint64_t initial_offset) { + delete reader_; + reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset); + } + void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) { WriteInitialOffsetLog(); reading_ = true; @@ -208,32 +224,48 @@ class LogTest { source_.contents_ = Slice(dest_.contents_); Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/, initial_offset); - Slice record; - std::string scratch; - ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch)); - ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset], - record.size()); - ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset], - offset_reader->LastRecordOffset()); - ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]); + + // Read all records from expected_record_offset through the last one. + ASSERT_LT(expected_record_offset, num_initial_offset_records_); + for (; expected_record_offset < num_initial_offset_records_; + ++expected_record_offset) { + Slice record; + std::string scratch; + ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch)); + ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset], + record.size()); + ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset], + offset_reader->LastRecordOffset()); + ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]); + } delete offset_reader; } - }; size_t LogTest::initial_offset_record_sizes_[] = {10000, // Two sizable records in first block 10000, 2 * log::kBlockSize - 1000, // Span three blocks - 1}; + 1, + 13716, // Consume all but two bytes of block 3. + log::kBlockSize - kHeaderSize, // Consume the entirety of block 4. + }; uint64_t LogTest::initial_offset_last_record_offsets_[] = {0, kHeaderSize + 10000, 2 * (kHeaderSize + 10000), 2 * (kHeaderSize + 10000) + - (2 * log::kBlockSize - 1000) + 3 * kHeaderSize}; + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, + 2 * (kHeaderSize + 10000) + + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize + + kHeaderSize + 1, + 3 * log::kBlockSize, + }; +// LogTest::initial_offset_last_record_offsets_ must be defined before this. +int LogTest::num_initial_offset_records_ = + sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t); TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); @@ -318,6 +350,15 @@ TEST(LogTest, AlignedEof) { ASSERT_EQ("EOF", Read()); } +TEST(LogTest, OpenForAppend) { + Write("hello"); + ReopenForAppend(); + Write("world"); + ASSERT_EQ("hello", Read()); + ASSERT_EQ("world", Read()); + ASSERT_EQ("EOF", Read()); +} + TEST(LogTest, RandomRead) { const int N = 500; Random write_rnd(301); @@ -445,6 +486,22 @@ TEST(LogTest, PartialLastIsIgnored) { ASSERT_EQ(0, DroppedBytes()); } +TEST(LogTest, SkipIntoMultiRecord) { + // Consider a fragmented record: + // first(R1), middle(R1), last(R1), first(R2) + // If initial_offset points to a record after first(R1) but before first(R2) + // incomplete fragment errors are not actual errors, and must be suppressed + // until a new first or full record is encountered. + Write(BigString("foo", 3*kBlockSize)); + Write("correct"); + StartReadingAt(kBlockSize); + + ASSERT_EQ("correct", Read()); + ASSERT_EQ("", ReportMessage()); + ASSERT_EQ(0, DroppedBytes()); + ASSERT_EQ("EOF", Read()); +} + TEST(LogTest, ErrorJoinsRecords) { // Consider two fragmented records: // first(R1) last(R1) first(R2) last(R2) @@ -514,6 +571,10 @@ TEST(LogTest, ReadFourthStart) { 3); } +TEST(LogTest, ReadInitialOffsetIntoBlockPadding) { + CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5); +} + TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc index 2da99ac088..74a03270da 100644 --- a/src/leveldb/db/log_writer.cc +++ b/src/leveldb/db/log_writer.cc @@ -12,15 +12,24 @@ namespace leveldb { namespace log { -Writer::Writer(WritableFile* dest) - : dest_(dest), - block_offset_(0) { +static void InitTypeCrc(uint32_t* type_crc) { for (int i = 0; i <= kMaxRecordType; i++) { char t = static_cast<char>(i); - type_crc_[i] = crc32c::Value(&t, 1); + type_crc[i] = crc32c::Value(&t, 1); } } +Writer::Writer(WritableFile* dest) + : dest_(dest), + block_offset_(0) { + InitTypeCrc(type_crc_); +} + +Writer::Writer(WritableFile* dest, uint64_t dest_length) + : dest_(dest), block_offset_(dest_length % kBlockSize) { + InitTypeCrc(type_crc_); +} + Writer::~Writer() { } diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h index a3a954d967..9e7cc4705b 100644 --- a/src/leveldb/db/log_writer.h +++ b/src/leveldb/db/log_writer.h @@ -22,6 +22,12 @@ class Writer { // "*dest" must be initially empty. // "*dest" must remain live while this Writer is in use. explicit Writer(WritableFile* dest); + + // Create a writer that will append data to "*dest". + // "*dest" must have initial length "dest_length". + // "*dest" must remain live while this Writer is in use. + Writer(WritableFile* dest, uint64_t dest_length); + ~Writer(); Status AddRecord(const Slice& slice); diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h index 92e90bb099..9f41567cde 100644 --- a/src/leveldb/db/memtable.h +++ b/src/leveldb/db/memtable.h @@ -36,10 +36,7 @@ class MemTable { } // Returns an estimate of the number of bytes of data in use by this - // data structure. - // - // REQUIRES: external synchronization to prevent simultaneous - // operations on the same MemTable. + // data structure. It is safe to call when MemTable is being modified. size_t ApproximateMemoryUsage(); // Return an iterator that yields the contents of the memtable. diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc new file mode 100644 index 0000000000..9596f4288a --- /dev/null +++ b/src/leveldb/db/recovery_test.cc @@ -0,0 +1,324 @@ +// Copyright (c) 2014 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "leveldb/db.h" +#include "leveldb/env.h" +#include "leveldb/write_batch.h" +#include "util/logging.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace leveldb { + +class RecoveryTest { + public: + RecoveryTest() : env_(Env::Default()), db_(NULL) { + dbname_ = test::TmpDir() + "/recovery_test"; + DestroyDB(dbname_, Options()); + Open(); + } + + ~RecoveryTest() { + Close(); + DestroyDB(dbname_, Options()); + } + + DBImpl* dbfull() const { return reinterpret_cast<DBImpl*>(db_); } + Env* env() const { return env_; } + + bool CanAppend() { + WritableFile* tmp; + Status s = env_->NewAppendableFile(CurrentFileName(dbname_), &tmp); + delete tmp; + if (s.IsNotSupportedError()) { + return false; + } else { + return true; + } + } + + void Close() { + delete db_; + db_ = NULL; + } + + void Open(Options* options = NULL) { + Close(); + Options opts; + if (options != NULL) { + opts = *options; + } else { + opts.reuse_logs = true; // TODO(sanjay): test both ways + opts.create_if_missing = true; + } + if (opts.env == NULL) { + opts.env = env_; + } + ASSERT_OK(DB::Open(opts, dbname_, &db_)); + ASSERT_EQ(1, NumLogs()); + } + + Status Put(const std::string& k, const std::string& v) { + return db_->Put(WriteOptions(), k, v); + } + + std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { + std::string result; + Status s = db_->Get(ReadOptions(), k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + std::string ManifestFileName() { + std::string current; + ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t)); + size_t len = current.size(); + if (len > 0 && current[len-1] == '\n') { + current.resize(len - 1); + } + return dbname_ + "/" + current; + } + + std::string LogName(uint64_t number) { + return LogFileName(dbname_, number); + } + + size_t DeleteLogFiles() { + std::vector<uint64_t> logs = GetFiles(kLogFile); + for (size_t i = 0; i < logs.size(); i++) { + ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); + } + return logs.size(); + } + + uint64_t FirstLogFile() { + return GetFiles(kLogFile)[0]; + } + + std::vector<uint64_t> GetFiles(FileType t) { + std::vector<std::string> filenames; + ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + std::vector<uint64_t> result; + for (size_t i = 0; i < filenames.size(); i++) { + uint64_t number; + FileType type; + if (ParseFileName(filenames[i], &number, &type) && type == t) { + result.push_back(number); + } + } + return result; + } + + int NumLogs() { + return GetFiles(kLogFile).size(); + } + + int NumTables() { + return GetFiles(kTableFile).size(); + } + + uint64_t FileSize(const std::string& fname) { + uint64_t result; + ASSERT_OK(env_->GetFileSize(fname, &result)) << fname; + return result; + } + + void CompactMemTable() { + dbfull()->TEST_CompactMemTable(); + } + + // Directly construct a log file that sets key to val. + void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { + std::string fname = LogFileName(dbname_, lognum); + WritableFile* file; + ASSERT_OK(env_->NewWritableFile(fname, &file)); + log::Writer writer(file); + WriteBatch batch; + batch.Put(key, val); + WriteBatchInternal::SetSequence(&batch, seq); + ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); + ASSERT_OK(file->Flush()); + delete file; + } + + private: + std::string dbname_; + Env* env_; + DB* db_; +}; + +TEST(RecoveryTest, ManifestReused) { + if (!CanAppend()) { + fprintf(stderr, "skipping test because env does not support appending\n"); + return; + } + ASSERT_OK(Put("foo", "bar")); + Close(); + std::string old_manifest = ManifestFileName(); + Open(); + ASSERT_EQ(old_manifest, ManifestFileName()); + ASSERT_EQ("bar", Get("foo")); + Open(); + ASSERT_EQ(old_manifest, ManifestFileName()); + ASSERT_EQ("bar", Get("foo")); +} + +TEST(RecoveryTest, LargeManifestCompacted) { + if (!CanAppend()) { + fprintf(stderr, "skipping test because env does not support appending\n"); + return; + } + ASSERT_OK(Put("foo", "bar")); + Close(); + std::string old_manifest = ManifestFileName(); + + // Pad with zeroes to make manifest file very big. + { + uint64_t len = FileSize(old_manifest); + WritableFile* file; + ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); + std::string zeroes(3*1048576 - static_cast<size_t>(len), 0); + ASSERT_OK(file->Append(zeroes)); + ASSERT_OK(file->Flush()); + delete file; + } + + Open(); + std::string new_manifest = ManifestFileName(); + ASSERT_NE(old_manifest, new_manifest); + ASSERT_GT(10000, FileSize(new_manifest)); + ASSERT_EQ("bar", Get("foo")); + + Open(); + ASSERT_EQ(new_manifest, ManifestFileName()); + ASSERT_EQ("bar", Get("foo")); +} + +TEST(RecoveryTest, NoLogFiles) { + ASSERT_OK(Put("foo", "bar")); + ASSERT_EQ(1, DeleteLogFiles()); + Open(); + ASSERT_EQ("NOT_FOUND", Get("foo")); + Open(); + ASSERT_EQ("NOT_FOUND", Get("foo")); +} + +TEST(RecoveryTest, LogFileReuse) { + if (!CanAppend()) { + fprintf(stderr, "skipping test because env does not support appending\n"); + return; + } + for (int i = 0; i < 2; i++) { + ASSERT_OK(Put("foo", "bar")); + if (i == 0) { + // Compact to ensure current log is empty + CompactMemTable(); + } + Close(); + ASSERT_EQ(1, NumLogs()); + uint64_t number = FirstLogFile(); + if (i == 0) { + ASSERT_EQ(0, FileSize(LogName(number))); + } else { + ASSERT_LT(0, FileSize(LogName(number))); + } + Open(); + ASSERT_EQ(1, NumLogs()); + ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; + ASSERT_EQ("bar", Get("foo")); + Open(); + ASSERT_EQ(1, NumLogs()); + ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; + ASSERT_EQ("bar", Get("foo")); + } +} + +TEST(RecoveryTest, MultipleMemTables) { + // Make a large log. + const int kNum = 1000; + for (int i = 0; i < kNum; i++) { + char buf[100]; + snprintf(buf, sizeof(buf), "%050d", i); + ASSERT_OK(Put(buf, buf)); + } + ASSERT_EQ(0, NumTables()); + Close(); + ASSERT_EQ(0, NumTables()); + ASSERT_EQ(1, NumLogs()); + uint64_t old_log_file = FirstLogFile(); + + // Force creation of multiple memtables by reducing the write buffer size. + Options opt; + opt.reuse_logs = true; + opt.write_buffer_size = (kNum*100) / 2; + Open(&opt); + ASSERT_LE(2, NumTables()); + ASSERT_EQ(1, NumLogs()); + ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log"; + for (int i = 0; i < kNum; i++) { + char buf[100]; + snprintf(buf, sizeof(buf), "%050d", i); + ASSERT_EQ(buf, Get(buf)); + } +} + +TEST(RecoveryTest, MultipleLogFiles) { + ASSERT_OK(Put("foo", "bar")); + Close(); + ASSERT_EQ(1, NumLogs()); + + // Make a bunch of uncompacted log files. + uint64_t old_log = FirstLogFile(); + MakeLogFile(old_log+1, 1000, "hello", "world"); + MakeLogFile(old_log+2, 1001, "hi", "there"); + MakeLogFile(old_log+3, 1002, "foo", "bar2"); + + // Recover and check that all log files were processed. + Open(); + ASSERT_LE(1, NumTables()); + ASSERT_EQ(1, NumLogs()); + uint64_t new_log = FirstLogFile(); + ASSERT_LE(old_log+3, new_log); + ASSERT_EQ("bar2", Get("foo")); + ASSERT_EQ("world", Get("hello")); + ASSERT_EQ("there", Get("hi")); + + // Test that previous recovery produced recoverable state. + Open(); + ASSERT_LE(1, NumTables()); + ASSERT_EQ(1, NumLogs()); + if (CanAppend()) { + ASSERT_EQ(new_log, FirstLogFile()); + } + ASSERT_EQ("bar2", Get("foo")); + ASSERT_EQ("world", Get("hello")); + ASSERT_EQ("there", Get("hi")); + + // Check that introducing an older log file does not cause it to be re-read. + Close(); + MakeLogFile(old_log+1, 2000, "hello", "stale write"); + Open(); + ASSERT_LE(1, NumTables()); + ASSERT_EQ(1, NumLogs()); + if (CanAppend()) { + ASSERT_EQ(new_log, FirstLogFile()); + } + ASSERT_EQ("bar2", Get("foo")); + ASSERT_EQ("world", Get("hello")); + ASSERT_EQ("there", Get("hi")); +} + +} // namespace leveldb + +int main(int argc, char** argv) { + return leveldb::test::RunAllTests(); +} diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h index ed8b092203..8bd77764d8 100644 --- a/src/leveldb/db/skiplist.h +++ b/src/leveldb/db/skiplist.h @@ -1,10 +1,10 @@ -#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_ -#define STORAGE_LEVELDB_DB_SKIPLIST_H_ - // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -// + +#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_ +#define STORAGE_LEVELDB_DB_SKIPLIST_H_ + // Thread safety // ------------- // diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc index c78f4b4fb1..aee1461e1b 100644 --- a/src/leveldb/db/skiplist_test.cc +++ b/src/leveldb/db/skiplist_test.cc @@ -250,7 +250,7 @@ class ConcurrentTest { // Note that generation 0 is never inserted, so it is ok if // <*,0,*> is missing. ASSERT_TRUE((gen(pos) == 0) || - (gen(pos) > initial_state.Get(key(pos))) + (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))) ) << "key: " << key(pos) << "; gen: " << gen(pos) << "; initgen: " diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h index e7f8fd2c37..6ed413c42d 100644 --- a/src/leveldb/db/snapshot.h +++ b/src/leveldb/db/snapshot.h @@ -5,6 +5,7 @@ #ifndef STORAGE_LEVELDB_DB_SNAPSHOT_H_ #define STORAGE_LEVELDB_DB_SNAPSHOT_H_ +#include "db/dbformat.h" #include "leveldb/db.h" namespace leveldb { diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc index aa83df55e4..a5e0f77a6a 100644 --- a/src/leveldb/db/version_set.cc +++ b/src/leveldb/db/version_set.cc @@ -893,7 +893,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { return s; } -Status VersionSet::Recover() { +Status VersionSet::Recover(bool *save_manifest) { struct LogReporter : public log::Reader::Reporter { Status* status; virtual void Corruption(size_t bytes, const Status& s) { @@ -1003,11 +1003,49 @@ Status VersionSet::Recover() { last_sequence_ = last_sequence; log_number_ = log_number; prev_log_number_ = prev_log_number; + + // See if we can reuse the existing MANIFEST file. + if (ReuseManifest(dscname, current)) { + // No need to save new manifest + } else { + *save_manifest = true; + } } return s; } +bool VersionSet::ReuseManifest(const std::string& dscname, + const std::string& dscbase) { + if (!options_->reuse_logs) { + return false; + } + FileType manifest_type; + uint64_t manifest_number; + uint64_t manifest_size; + if (!ParseFileName(dscbase, &manifest_number, &manifest_type) || + manifest_type != kDescriptorFile || + !env_->GetFileSize(dscname, &manifest_size).ok() || + // Make new compacted MANIFEST if old one is too big + manifest_size >= kTargetFileSize) { + return false; + } + + assert(descriptor_file_ == NULL); + assert(descriptor_log_ == NULL); + Status r = env_->NewAppendableFile(dscname, &descriptor_file_); + if (!r.ok()) { + Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str()); + assert(descriptor_file_ == NULL); + return false; + } + + Log(options_->info_log, "Reusing MANIFEST %s\n", dscname.c_str()); + descriptor_log_ = new log::Writer(descriptor_file_, manifest_size); + manifest_file_number_ = manifest_number; + return true; +} + void VersionSet::MarkFileNumberUsed(uint64_t number) { if (next_file_number_ <= number) { next_file_number_ = number + 1; diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h index 8dc14b8e01..1dec745673 100644 --- a/src/leveldb/db/version_set.h +++ b/src/leveldb/db/version_set.h @@ -179,7 +179,7 @@ class VersionSet { EXCLUSIVE_LOCKS_REQUIRED(mu); // Recover the last saved descriptor from persistent storage. - Status Recover(); + Status Recover(bool *save_manifest); // Return the current version. Version* current() const { return current_; } @@ -274,6 +274,8 @@ class VersionSet { friend class Compaction; friend class Version; + bool ReuseManifest(const std::string& dscname, const std::string& dscbase); + void Finalize(Version* v); void GetRange(const std::vector<FileMetaData*>& inputs, diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h index 310a3c8912..9448ef7b21 100644 --- a/src/leveldb/db/write_batch_internal.h +++ b/src/leveldb/db/write_batch_internal.h @@ -5,6 +5,7 @@ #ifndef STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_ #define STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_ +#include "db/dbformat.h" #include "leveldb/write_batch.h" namespace leveldb { diff --git a/src/leveldb/doc/index.html b/src/leveldb/doc/index.html index 3ed0ed9d9e..2155192581 100644 --- a/src/leveldb/doc/index.html +++ b/src/leveldb/doc/index.html @@ -22,7 +22,7 @@ directory. The following example shows how to open a database, creating it if necessary: <p> <pre> - #include <assert> + #include <cassert> #include "leveldb/db.h" leveldb::DB* db; diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc index 43ef2e0729..9a98884daf 100644 --- a/src/leveldb/helpers/memenv/memenv.cc +++ b/src/leveldb/helpers/memenv/memenv.cc @@ -277,6 +277,19 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result) { + MutexLock lock(&mutex_); + FileState** sptr = &file_map_[fname]; + FileState* file = *sptr; + if (file == NULL) { + file = new FileState(); + file->Ref(); + } + *result = new WritableFileImpl(file); + return Status::OK(); + } + virtual bool FileExists(const std::string& fname) { MutexLock lock(&mutex_); return file_map_.find(fname) != file_map_.end(); diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc index a44310fed8..5cff77613f 100644 --- a/src/leveldb/helpers/memenv/memenv_test.cc +++ b/src/leveldb/helpers/memenv/memenv_test.cc @@ -40,6 +40,8 @@ TEST(MemEnvTest, Basics) { // Create a file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); + ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_EQ(0, file_size); delete writable_file; // Check that the file exists. @@ -55,9 +57,16 @@ TEST(MemEnvTest, Basics) { ASSERT_OK(writable_file->Append("abc")); delete writable_file; - // Check for expected size. + // Check that append works. + ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file)); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(3, file_size); + ASSERT_OK(writable_file->Append("hello")); + delete writable_file; + + // Check for expected size. + ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_EQ(8, file_size); // Check that renaming works. ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); @@ -65,7 +74,7 @@ TEST(MemEnvTest, Basics) { ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/g")); ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); - ASSERT_EQ(3, file_size); + ASSERT_EQ(8, file_size); // Check that opening non-existent file fails. SequentialFile* seq_file; diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h index 1a201e5e0a..6819d5bc49 100644 --- a/src/leveldb/include/leveldb/cache.h +++ b/src/leveldb/include/leveldb/cache.h @@ -81,6 +81,17 @@ class Cache { // its cache keys. virtual uint64_t NewId() = 0; + // Remove all cache entries that are not actively in use. Memory-constrained + // applications may wish to call this method to reduce memory usage. + // Default implementation of Prune() does nothing. Subclasses are strongly + // encouraged to override the default implementation. A future release of + // leveldb may change Prune() to a pure abstract method. + virtual void Prune() {} + + // Return an estimate of the combined charges of all elements stored in the + // cache. + virtual size_t TotalCharge() const = 0; + private: void LRU_Remove(Handle* e); void LRU_Append(Handle* e); diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h index 4c169bf22e..9752cbad51 100644 --- a/src/leveldb/include/leveldb/db.h +++ b/src/leveldb/include/leveldb/db.h @@ -14,7 +14,7 @@ namespace leveldb { // Update Makefile if you change these static const int kMajorVersion = 1; -static const int kMinorVersion = 18; +static const int kMinorVersion = 19; struct Options; struct ReadOptions; @@ -115,6 +115,8 @@ class DB { // about the internal operation of the DB. // "leveldb.sstables" - returns a multi-line string that describes all // of the sstables that make up the db contents. + // "leveldb.approximate-memory-usage" - returns the approximate number of + // bytes of memory in use by the DB. virtual bool GetProperty(const Slice& property, std::string* value) = 0; // For each i in [0,n-1], store in "sizes[i]", the approximate diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h index f709514da6..99b6c21414 100644 --- a/src/leveldb/include/leveldb/env.h +++ b/src/leveldb/include/leveldb/env.h @@ -69,6 +69,21 @@ class Env { virtual Status NewWritableFile(const std::string& fname, WritableFile** result) = 0; + // Create an object that either appends to an existing file, or + // writes to a new file (if the file does not exist to begin with). + // On success, stores a pointer to the new file in *result and + // returns OK. On failure stores NULL in *result and returns + // non-OK. + // + // The returned file will only be accessed by one thread at a time. + // + // May return an IsNotSupportedError error if this Env does + // not allow appending to an existing file. Users of Env (including + // the leveldb implementation) must be prepared to deal with + // an Env that does not support appending. + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result); + // Returns true iff the named file exists. virtual bool FileExists(const std::string& fname) = 0; @@ -289,6 +304,9 @@ class EnvWrapper : public Env { Status NewWritableFile(const std::string& f, WritableFile** r) { return target_->NewWritableFile(f, r); } + Status NewAppendableFile(const std::string& f, WritableFile** r) { + return target_->NewAppendableFile(f, r); + } bool FileExists(const std::string& f) { return target_->FileExists(f); } Status GetChildren(const std::string& dir, std::vector<std::string>* r) { return target_->GetChildren(dir, r); diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h index 76aced04bd..da631ed9d8 100644 --- a/src/leveldb/include/leveldb/iterator.h +++ b/src/leveldb/include/leveldb/iterator.h @@ -37,7 +37,7 @@ class Iterator { // Valid() after this call iff the source is not empty. virtual void SeekToLast() = 0; - // Position at the first key in the source that at or past target + // Position at the first key in the source that is at or past target. // The iterator is Valid() after this call iff the source contains // an entry that comes at or past target. virtual void Seek(const Slice& target) = 0; diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h index 7c9b973454..83a1ef39a4 100644 --- a/src/leveldb/include/leveldb/options.h +++ b/src/leveldb/include/leveldb/options.h @@ -128,6 +128,12 @@ struct Options { // efficiently detect that and will switch to uncompressed mode. CompressionType compression; + // EXPERIMENTAL: If true, append to existing MANIFEST and log files + // when a database is opened. This can significantly speed up open. + // + // Default: currently false, but may become true later. + bool reuse_logs; + // If non-NULL, use the specified filter policy to reduce disk reads. // Many applications will benefit from passing the result of // NewBloomFilterPolicy() here. diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h index 11dbd4b47e..d9575f9753 100644 --- a/src/leveldb/include/leveldb/status.h +++ b/src/leveldb/include/leveldb/status.h @@ -60,6 +60,12 @@ class Status { // Returns true iff the status indicates an IOError. bool IsIOError() const { return code() == kIOError; } + // Returns true iff the status indicates a NotSupportedError. + bool IsNotSupportedError() const { return code() == kNotSupported; } + + // Returns true iff the status indicates an InvalidArgument. + bool IsInvalidArgument() const { return code() == kInvalidArgument; } + // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h index 9bf091f757..1c4c7aafc6 100644 --- a/src/leveldb/port/atomic_pointer.h +++ b/src/leveldb/port/atomic_pointer.h @@ -35,8 +35,12 @@ #define ARCH_CPU_X86_FAMILY 1 #elif defined(__ARMEL__) #define ARCH_CPU_ARM_FAMILY 1 +#elif defined(__aarch64__) +#define ARCH_CPU_ARM64_FAMILY 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) #define ARCH_CPU_PPC_FAMILY 1 +#elif defined(__mips__) +#define ARCH_CPU_MIPS_FAMILY 1 #endif namespace leveldb { @@ -92,6 +96,13 @@ inline void MemoryBarrier() { } #define LEVELDB_HAVE_MEMORY_BARRIER +// ARM64 +#elif defined(ARCH_CPU_ARM64_FAMILY) +inline void MemoryBarrier() { + asm volatile("dmb sy" : : : "memory"); +} +#define LEVELDB_HAVE_MEMORY_BARRIER + // PPC #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { @@ -101,6 +112,13 @@ inline void MemoryBarrier() { } #define LEVELDB_HAVE_MEMORY_BARRIER +// MIPS +#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) +inline void MemoryBarrier() { + __asm__ __volatile__("sync" : : : "memory"); +} +#define LEVELDB_HAVE_MEMORY_BARRIER + #endif // AtomicPointer built using platform-specific MemoryBarrier() @@ -215,6 +233,7 @@ class AtomicPointer { #undef LEVELDB_HAVE_MEMORY_BARRIER #undef ARCH_CPU_X86_FAMILY #undef ARCH_CPU_ARM_FAMILY +#undef ARCH_CPU_ARM64_FAMILY #undef ARCH_CPU_PPC_FAMILY } // namespace port diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc index 5ba127a5b9..30e8007ae3 100644 --- a/src/leveldb/port/port_posix.cc +++ b/src/leveldb/port/port_posix.cc @@ -7,7 +7,6 @@ #include <cstdlib> #include <stdio.h> #include <string.h> -#include "util/logging.h" namespace leveldb { namespace port { diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc index 203e15c8bc..4e78b954f8 100644 --- a/src/leveldb/table/filter_block.cc +++ b/src/leveldb/table/filter_block.cc @@ -68,7 +68,7 @@ void FilterBlockBuilder::GenerateFilter() { // Generate filter for current set of keys and append to result_. filter_offsets_.push_back(result_.size()); - policy_->CreateFilter(&tmp_keys_[0], num_keys, &result_); + policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_); tmp_keys_.clear(); keys_.clear(); @@ -97,7 +97,7 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) { if (index < num_) { uint32_t start = DecodeFixed32(offset_ + index*4); uint32_t limit = DecodeFixed32(offset_ + index*4 + 4); - if (start <= limit && limit <= (offset_ - data_)) { + if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) { Slice filter = Slice(data_ + start, limit - start); return policy_->KeyMayMatch(key, filter); } else if (start == limit) { diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc index aa63144c9e..24e4e02445 100644 --- a/src/leveldb/table/format.cc +++ b/src/leveldb/table/format.cc @@ -30,15 +30,14 @@ Status BlockHandle::DecodeFrom(Slice* input) { } void Footer::EncodeTo(std::string* dst) const { -#ifndef NDEBUG const size_t original_size = dst->size(); -#endif metaindex_handle_.EncodeTo(dst); index_handle_.EncodeTo(dst); dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu)); PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32)); assert(dst->size() == original_size + kEncodedLength); + (void)original_size; // Disable unused variable warning. } Status Footer::DecodeFrom(Slice* input) { diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h index 9e16b3dbed..f410c3fabe 100644 --- a/src/leveldb/table/iterator_wrapper.h +++ b/src/leveldb/table/iterator_wrapper.h @@ -5,6 +5,9 @@ #ifndef STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_ #define STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_ +#include "leveldb/iterator.h" +#include "leveldb/slice.h" + namespace leveldb { // A internal wrapper class with an interface similar to Iterator that diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc index dff8a82590..decf8082cc 100644 --- a/src/leveldb/table/table.cc +++ b/src/leveldb/table/table.cc @@ -82,7 +82,7 @@ Status Table::Open(const Options& options, *table = new Table(rep); (*table)->ReadMeta(footer); } else { - if (index_block) delete index_block; + delete index_block; } return s; diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc index c723bf84cf..abf6e246ff 100644 --- a/src/leveldb/table/table_test.cc +++ b/src/leveldb/table/table_test.cc @@ -853,12 +853,20 @@ TEST(TableTest, ApproximateOffsetOfCompressed) { options.compression = kSnappyCompression; c.Finish(options, &keys, &kvmap); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6000)); + // Expected upper and lower bounds of space used by compressible strings. + static const int kSlop = 1000; // Compressor effectiveness varies. + const int expected = 2500; // 10000 * compression ratio (0.25) + const int min_z = expected - kSlop; + const int max_z = expected + kSlop; + + ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop)); + // Have now emitted a large compressible string, so adjust expected offset. + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z)); + // Have now emitted two large compressible strings, so adjust expected offset. + ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z)); } } // namespace leveldb diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc index 9367f71492..74078213ee 100644 --- a/src/leveldb/util/arena.cc +++ b/src/leveldb/util/arena.cc @@ -9,8 +9,7 @@ namespace leveldb { static const int kBlockSize = 4096; -Arena::Arena() { - blocks_memory_ = 0; +Arena::Arena() : memory_usage_(0) { alloc_ptr_ = NULL; // First allocation will allocate a block alloc_bytes_remaining_ = 0; } @@ -60,8 +59,9 @@ char* Arena::AllocateAligned(size_t bytes) { char* Arena::AllocateNewBlock(size_t block_bytes) { char* result = new char[block_bytes]; - blocks_memory_ += block_bytes; blocks_.push_back(result); + memory_usage_.NoBarrier_Store( + reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*))); return result; } diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h index 73bbf1cb9b..48bab33741 100644 --- a/src/leveldb/util/arena.h +++ b/src/leveldb/util/arena.h @@ -9,6 +9,7 @@ #include <assert.h> #include <stddef.h> #include <stdint.h> +#include "port/port.h" namespace leveldb { @@ -24,10 +25,9 @@ class Arena { char* AllocateAligned(size_t bytes); // Returns an estimate of the total memory usage of data allocated - // by the arena (including space allocated but not yet used for user - // allocations). + // by the arena. size_t MemoryUsage() const { - return blocks_memory_ + blocks_.capacity() * sizeof(char*); + return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load()); } private: @@ -41,8 +41,8 @@ class Arena { // Array of new[] allocated memory blocks std::vector<char*> blocks_; - // Bytes of memory in blocks allocated so far - size_t blocks_memory_; + // Total memory usage of the arena. + port::AtomicPointer memory_usage_; // No copying allowed Arena(const Arena&); diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc index a27a2ace28..bf3e4ca6e9 100644 --- a/src/leveldb/util/bloom.cc +++ b/src/leveldb/util/bloom.cc @@ -47,7 +47,7 @@ class BloomFilterPolicy : public FilterPolicy { dst->resize(init_size + bytes, 0); dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter char* array = &(*dst)[init_size]; - for (size_t i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { // Use double-hashing to generate a sequence of hash values. // See analysis in [Kirsch,Mitzenmacher 2006]. uint32_t h = BloomHash(keys[i]); diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc index 77fb1b3159..1b87a2be3f 100644 --- a/src/leveldb/util/bloom_test.cc +++ b/src/leveldb/util/bloom_test.cc @@ -46,7 +46,8 @@ class BloomTest { key_slices.push_back(Slice(keys_[i])); } filter_.clear(); - policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_); + policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()), + &filter_); keys_.clear(); if (kVerbose >= 2) DumpFilter(); } diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc index 8b197bc02a..ce46886171 100644 --- a/src/leveldb/util/cache.cc +++ b/src/leveldb/util/cache.cc @@ -19,6 +19,23 @@ Cache::~Cache() { namespace { // LRU cache implementation +// +// Cache entries have an "in_cache" boolean indicating whether the cache has a +// reference on the entry. The only ways that this can become false without the +// entry being passed to its "deleter" are via Erase(), via Insert() when +// an element with a duplicate key is inserted, or on destruction of the cache. +// +// The cache keeps two linked lists of items in the cache. All items in the +// cache are in one list or the other, and never both. Items still referenced +// by clients but erased from the cache are in neither list. The lists are: +// - in-use: contains the items currently referenced by clients, in no +// particular order. (This list is used for invariant checking. If we +// removed the check, elements that would otherwise be on this list could be +// left as disconnected singleton lists.) +// - LRU: contains the items not currently referenced by clients, in LRU order +// Elements are moved between these lists by the Ref() and Unref() methods, +// when they detect an element in the cache acquiring or losing its only +// external reference. // An entry is a variable length heap-allocated structure. Entries // are kept in a circular doubly linked list ordered by access time. @@ -30,7 +47,8 @@ struct LRUHandle { LRUHandle* prev; size_t charge; // TODO(opt): Only allow uint32_t? size_t key_length; - uint32_t refs; + bool in_cache; // Whether entry is in the cache. + uint32_t refs; // References, including cache reference, if present. uint32_t hash; // Hash of key(); used for fast sharding and comparisons char key_data[1]; // Beginning of key @@ -147,49 +165,77 @@ class LRUCache { Cache::Handle* Lookup(const Slice& key, uint32_t hash); void Release(Cache::Handle* handle); void Erase(const Slice& key, uint32_t hash); + void Prune(); + size_t TotalCharge() const { + MutexLock l(&mutex_); + return usage_; + } private: void LRU_Remove(LRUHandle* e); - void LRU_Append(LRUHandle* e); + void LRU_Append(LRUHandle*list, LRUHandle* e); + void Ref(LRUHandle* e); void Unref(LRUHandle* e); + bool FinishErase(LRUHandle* e); // Initialized before use. size_t capacity_; // mutex_ protects the following state. - port::Mutex mutex_; + mutable port::Mutex mutex_; size_t usage_; // Dummy head of LRU list. // lru.prev is newest entry, lru.next is oldest entry. + // Entries have refs==1 and in_cache==true. LRUHandle lru_; + // Dummy head of in-use list. + // Entries are in use by clients, and have refs >= 2 and in_cache==true. + LRUHandle in_use_; + HandleTable table_; }; LRUCache::LRUCache() : usage_(0) { - // Make empty circular linked list + // Make empty circular linked lists. lru_.next = &lru_; lru_.prev = &lru_; + in_use_.next = &in_use_; + in_use_.prev = &in_use_; } LRUCache::~LRUCache() { + assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle for (LRUHandle* e = lru_.next; e != &lru_; ) { LRUHandle* next = e->next; - assert(e->refs == 1); // Error if caller has an unreleased handle + assert(e->in_cache); + e->in_cache = false; + assert(e->refs == 1); // Invariant of lru_ list. Unref(e); e = next; } } +void LRUCache::Ref(LRUHandle* e) { + if (e->refs == 1 && e->in_cache) { // If on lru_ list, move to in_use_ list. + LRU_Remove(e); + LRU_Append(&in_use_, e); + } + e->refs++; +} + void LRUCache::Unref(LRUHandle* e) { assert(e->refs > 0); e->refs--; - if (e->refs <= 0) { - usage_ -= e->charge; + if (e->refs == 0) { // Deallocate. + assert(!e->in_cache); (*e->deleter)(e->key(), e->value); free(e); + } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list. + LRU_Remove(e); + LRU_Append(&lru_, e); } } @@ -198,10 +244,10 @@ void LRUCache::LRU_Remove(LRUHandle* e) { e->prev->next = e->next; } -void LRUCache::LRU_Append(LRUHandle* e) { - // Make "e" newest entry by inserting just before lru_ - e->next = &lru_; - e->prev = lru_.prev; +void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) { + // Make "e" newest entry by inserting just before *list + e->next = list; + e->prev = list->prev; e->prev->next = e; e->next->prev = e; } @@ -210,9 +256,7 @@ Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { MutexLock l(&mutex_); LRUHandle* e = table_.Lookup(key, hash); if (e != NULL) { - e->refs++; - LRU_Remove(e); - LRU_Append(e); + Ref(e); } return reinterpret_cast<Cache::Handle*>(e); } @@ -234,34 +278,58 @@ Cache::Handle* LRUCache::Insert( e->charge = charge; e->key_length = key.size(); e->hash = hash; - e->refs = 2; // One from LRUCache, one for the returned handle + e->in_cache = false; + e->refs = 1; // for the returned handle. memcpy(e->key_data, key.data(), key.size()); - LRU_Append(e); - usage_ += charge; - LRUHandle* old = table_.Insert(e); - if (old != NULL) { - LRU_Remove(old); - Unref(old); - } + if (capacity_ > 0) { + e->refs++; // for the cache's reference. + e->in_cache = true; + LRU_Append(&in_use_, e); + usage_ += charge; + FinishErase(table_.Insert(e)); + } // else don't cache. (Tests use capacity_==0 to turn off caching.) while (usage_ > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; - LRU_Remove(old); - table_.Remove(old->key(), old->hash); - Unref(old); + assert(old->refs == 1); + bool erased = FinishErase(table_.Remove(old->key(), old->hash)); + if (!erased) { // to avoid unused variable when compiled NDEBUG + assert(erased); + } } return reinterpret_cast<Cache::Handle*>(e); } -void LRUCache::Erase(const Slice& key, uint32_t hash) { - MutexLock l(&mutex_); - LRUHandle* e = table_.Remove(key, hash); +// If e != NULL, finish removing *e from the cache; it has already been removed +// from the hash table. Return whether e != NULL. Requires mutex_ held. +bool LRUCache::FinishErase(LRUHandle* e) { if (e != NULL) { + assert(e->in_cache); LRU_Remove(e); + e->in_cache = false; + usage_ -= e->charge; Unref(e); } + return e != NULL; +} + +void LRUCache::Erase(const Slice& key, uint32_t hash) { + MutexLock l(&mutex_); + FinishErase(table_.Remove(key, hash)); +} + +void LRUCache::Prune() { + MutexLock l(&mutex_); + while (lru_.next != &lru_) { + LRUHandle* e = lru_.next; + assert(e->refs == 1); + bool erased = FinishErase(table_.Remove(e->key(), e->hash)); + if (!erased) { // to avoid unused variable when compiled NDEBUG + assert(erased); + } + } } static const int kNumShardBits = 4; @@ -314,6 +382,18 @@ class ShardedLRUCache : public Cache { MutexLock l(&id_mutex_); return ++(last_id_); } + virtual void Prune() { + for (int s = 0; s < kNumShards; s++) { + shard_[s].Prune(); + } + } + virtual size_t TotalCharge() const { + size_t total = 0; + for (int s = 0; s < kNumShards; s++) { + total += shard_[s].TotalCharge(); + } + return total; + } }; } // end anonymous namespace diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc index 43716715a8..468f7a6425 100644 --- a/src/leveldb/util/cache_test.cc +++ b/src/leveldb/util/cache_test.cc @@ -59,6 +59,11 @@ class CacheTest { &CacheTest::Deleter)); } + Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) { + return cache_->Insert(EncodeKey(key), EncodeValue(value), charge, + &CacheTest::Deleter); + } + void Erase(int key) { cache_->Erase(EncodeKey(key)); } @@ -135,8 +140,11 @@ TEST(CacheTest, EntriesArePinned) { TEST(CacheTest, EvictionPolicy) { Insert(100, 101); Insert(200, 201); + Insert(300, 301); + Cache::Handle* h = cache_->Lookup(EncodeKey(300)); - // Frequently used entry must be kept around + // Frequently used entry must be kept around, + // as must things that are still in use. for (int i = 0; i < kCacheSize + 100; i++) { Insert(1000+i, 2000+i); ASSERT_EQ(2000+i, Lookup(1000+i)); @@ -144,6 +152,25 @@ TEST(CacheTest, EvictionPolicy) { } ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(-1, Lookup(200)); + ASSERT_EQ(301, Lookup(300)); + cache_->Release(h); +} + +TEST(CacheTest, UseExceedsCacheSize) { + // Overfill the cache, keeping handles on all inserted entries. + std::vector<Cache::Handle*> h; + for (int i = 0; i < kCacheSize + 100; i++) { + h.push_back(InsertAndReturnHandle(1000+i, 2000+i)); + } + + // Check that all the entries can be found in the cache. + for (int i = 0; i < h.size(); i++) { + ASSERT_EQ(2000+i, Lookup(1000+i)); + } + + for (int i = 0; i < h.size(); i++) { + cache_->Release(h[i]); + } } TEST(CacheTest, HeavyEntries) { @@ -179,6 +206,19 @@ TEST(CacheTest, NewId) { ASSERT_NE(a, b); } +TEST(CacheTest, Prune) { + Insert(1, 100); + Insert(2, 200); + + Cache::Handle* handle = cache_->Lookup(EncodeKey(1)); + ASSERT_TRUE(handle); + cache_->Prune(); + cache_->Release(handle); + + ASSERT_EQ(100, Lookup(1)); + ASSERT_EQ(-1, Lookup(2)); +} + } // namespace leveldb int main(int argc, char** argv) { diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc index c2600e964a..c58a0821ef 100644 --- a/src/leveldb/util/env.cc +++ b/src/leveldb/util/env.cc @@ -9,6 +9,10 @@ namespace leveldb { Env::~Env() { } +Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) { + return Status::NotSupported("NewAppendableFile", fname); +} + SequentialFile::~SequentialFile() { } diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc index ba2667864a..e0fca52f46 100644 --- a/src/leveldb/util/env_posix.cc +++ b/src/leveldb/util/env_posix.cc @@ -351,6 +351,19 @@ class PosixEnv : public Env { return s; } + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result) { + Status s; + FILE* f = fopen(fname.c_str(), "a"); + if (f == NULL) { + *result = NULL; + s = IOError(fname, errno); + } else { + *result = new PosixWritableFile(fname, f); + } + return s; + } + virtual bool FileExists(const std::string& fname) { return access(fname.c_str(), F_OK) == 0; } diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc index e11a96b791..b074b7579e 100644 --- a/src/leveldb/util/env_win.cc +++ b/src/leveldb/util/env_win.cc @@ -106,7 +106,7 @@ private: class Win32WritableFile : public WritableFile { public: - Win32WritableFile(const std::string& fname); + Win32WritableFile(const std::string& fname, bool append); ~Win32WritableFile(); virtual Status Append(const Slice& data); @@ -158,6 +158,8 @@ public: RandomAccessFile** result); virtual Status NewWritableFile(const std::string& fname, WritableFile** result); + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result); virtual bool FileExists(const std::string& fname); @@ -423,17 +425,23 @@ void Win32RandomAccessFile::_CleanUp() } } -Win32WritableFile::Win32WritableFile(const std::string& fname) +Win32WritableFile::Win32WritableFile(const std::string& fname, bool append) : filename_(fname) { std::wstring path; ToWidePath(fname, path); - DWORD Flag = PathFileExistsW(path.c_str()) ? OPEN_EXISTING : CREATE_ALWAYS; + // NewAppendableFile: append to an existing file, or create a new one + // if none exists - this is OPEN_ALWAYS behavior, with + // FILE_APPEND_DATA to avoid having to manually position the file + // pointer at the end of the file. + // NewWritableFile: create a new file, delete if it exists - this is + // CREATE_ALWAYS behavior. This file is used for writing only so + // use GENERIC_WRITE. _hFile = CreateFileW(path.c_str(), - GENERIC_READ | GENERIC_WRITE, + append ? FILE_APPEND_DATA : GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE, NULL, - Flag, + append ? OPEN_ALWAYS : CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); // CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use @@ -823,7 +831,9 @@ Status Win32Env::NewLogger( const std::string& fname, Logger** result ) { Status sRet; std::string path = fname; - Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path)); + // Logs are opened with write semantics, not with append semantics + // (see PosixEnv::NewLogger) + Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false); if(!pMapFile->isEnable()){ delete pMapFile; *result = NULL; @@ -837,7 +847,20 @@ Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** resul { Status sRet; std::string path = fname; - Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path)); + Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false); + if(!pFile->isEnable()){ + *result = NULL; + sRet = Status::IOError(fname,Win32::GetLastErrSz()); + }else + *result = pFile; + return sRet; +} + +Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result ) +{ + Status sRet; + std::string path = fname; + Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true); if(!pFile->isEnable()){ *result = NULL; sRet = Status::IOError(fname,Win32::GetLastErrSz()); diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc index 76af5b9302..8b618fb1ae 100644 --- a/src/leveldb/util/options.cc +++ b/src/leveldb/util/options.cc @@ -22,8 +22,8 @@ Options::Options() block_size(4096), block_restart_interval(16), compression(kSnappyCompression), + reuse_logs(false), filter_policy(NULL) { } - } // namespace leveldb diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h index adad3fc1ea..d7e4583702 100644 --- a/src/leveldb/util/testutil.h +++ b/src/leveldb/util/testutil.h @@ -45,6 +45,16 @@ class ErrorEnv : public EnvWrapper { } return target()->NewWritableFile(fname, result); } + + virtual Status NewAppendableFile(const std::string& fname, + WritableFile** result) { + if (writable_file_error_) { + ++num_writable_file_errors_; + *result = NULL; + return Status::IOError(fname, "fake error"); + } + return target()->NewAppendableFile(fname, result); + } }; } // namespace test diff --git a/src/memusage.h b/src/memusage.h index 3810bfad07..2e3c5a9b92 100644 --- a/src/memusage.h +++ b/src/memusage.h @@ -1,4 +1,4 @@ -// Copyright (c) 2015 The Bitcoin developers +// Copyright (c) 2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 31332526a9..882717ac56 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -23,8 +23,8 @@ CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter) for (unsigned int i = 0; i < block.vtx.size(); i++) { - const uint256& hash = block.vtx[i].GetHash(); - if (filter.IsRelevantAndUpdate(block.vtx[i])) + const uint256& hash = block.vtx[i]->GetHash(); + if (filter.IsRelevantAndUpdate(*block.vtx[i])) { vMatch.push_back(true); vMatchedTxn.push_back(make_pair(i, hash)); @@ -49,7 +49,7 @@ CMerkleBlock::CMerkleBlock(const CBlock& block, const std::set<uint256>& txids) for (unsigned int i = 0; i < block.vtx.size(); i++) { - const uint256& hash = block.vtx[i].GetHash(); + const uint256& hash = block.vtx[i]->GetHash(); if (txids.count(hash)) vMatch.push_back(true); else diff --git a/src/merkleblock.h b/src/merkleblock.h index 835cbcce55..17c33194a9 100644 --- a/src/merkleblock.h +++ b/src/merkleblock.h @@ -85,7 +85,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(nTransactions); READWRITE(vHash); std::vector<unsigned char> vBytes; @@ -148,7 +148,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(header); READWRITE(txn); } diff --git a/src/miner.cpp b/src/miner.cpp index ebf2f21ffd..e80e8a2656 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -13,7 +13,7 @@ #include "consensus/merkle.h" #include "consensus/validation.h" #include "hash.h" -#include "main.h" +#include "validation.h" #include "net.h" #include "policy/policy.h" #include "pow.h" @@ -134,7 +134,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc pblock = &pblocktemplate->block; // pointer for convenience // Add dummy coinbase tx as first transaction - pblock->vtx.push_back(CTransaction()); + pblock->vtx.emplace_back(); pblocktemplate->vTxFees.push_back(-1); // updated at end pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end @@ -169,7 +169,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc nLastBlockTx = nBlockTx; nLastBlockSize = nBlockSize; nLastBlockWeight = nBlockWeight; - LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOpsCost); // Create coinbase transaction. CMutableTransaction coinbaseTx; @@ -179,16 +178,19 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn; coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; - pblock->vtx[0] = coinbaseTx; + pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx)); pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus()); pblocktemplate->vTxFees[0] = -nFees; + uint64_t nSerializeSize = GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION); + LogPrintf("CreateNewBlock(): total size: %u block weight: %u txs: %u fees: %ld sigops %d\n", nSerializeSize, GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost); + // Fill in header pblock->hashPrevBlock = pindexPrev->GetBlockHash(); UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus()); pblock->nNonce = 0; - pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(pblock->vtx[0]); + pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]); CValidationState state; if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) { @@ -310,7 +312,7 @@ bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter) void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { - pblock->vtx.push_back(iter->GetTx()); + pblock->vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost()); if (fNeedSizeAccounting) { @@ -599,10 +601,10 @@ void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned } ++nExtraNonce; unsigned int nHeight = pindexPrev->nHeight+1; // Height first in coinbase required for block.version=2 - CMutableTransaction txCoinbase(pblock->vtx[0]); + CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce)) + COINBASE_FLAGS; assert(txCoinbase.vin[0].scriptSig.size() <= 100); - pblock->vtx[0] = txCoinbase; + pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); } diff --git a/src/net.cpp b/src/net.cpp index 1bca168d1d..27b200b3f0 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -64,6 +64,7 @@ const static std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8] +static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8] // // Global state variables // @@ -73,7 +74,6 @@ bool fRelayTxes = true; CCriticalSection cs_mapLocalHost; std::map<CNetAddr, LocalServiceInfo> mapLocalHost; static bool vfLimited[NET_MAX] = {}; -static CNode* pnodeLocalHost = NULL; std::string strSubVersion; limitedmap<uint256, int64_t> mapAlreadyAskedFor(MAX_INV_SZ); @@ -389,18 +389,18 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo addrman.Attempt(addrConnect, fCountFailure); // Add node - CNode* pnode = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), pszDest ? pszDest : "", false); - GetNodeSignals().InitializeNode(pnode->GetId(), pnode); + NodeId id = GetNewNodeId(); + uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); + CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, pszDest ? pszDest : "", false); + pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices); + pnode->nTimeConnected = GetTime(); pnode->AddRef(); - + GetNodeSignals().InitializeNode(pnode, *this); { LOCK(cs_vNodes); vNodes.push_back(pnode); } - pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices); - pnode->nTimeConnected = GetTime(); - return pnode; } else if (!proxyConnectionFailed) { // If connecting to the node failed, and failure is not caused by a problem connecting to @@ -446,23 +446,6 @@ void CNode::CloseSocketDisconnect() vRecvMsg.clear(); } -void CNode::PushVersion() -{ - int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime()); - CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); - CAddress addrMe = CAddress(CService(), nLocalServices); - if (fLogIPs) - LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nMyStartingHeight, addrMe.ToString(), addrYou.ToString(), id); - else - LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nMyStartingHeight, addrMe.ToString(), id); - PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalServices, nTime, addrYou, addrMe, - nLocalHostNonce, strSubVersion, nMyStartingHeight, ::fRelayTxes); -} - - - - - void CConnman::ClearBanned() { { @@ -758,12 +741,21 @@ int CNetMessage::readData(const char *pch, unsigned int nBytes) vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } + hasher.Write((const unsigned char*)pch, nCopy); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } +const uint256& CNetMessage::GetMessageHash() const +{ + assert(complete()); + if (data_hash.IsNull()) + hasher.Finalize(data_hash.begin()); + return data_hash; +} + @@ -775,13 +767,13 @@ int CNetMessage::readData(const char *pch, unsigned int nBytes) // requires LOCK(cs_vSend) size_t SocketSendData(CNode *pnode) { - std::deque<CSerializeData>::iterator it = pnode->vSendMsg.begin(); + auto it = pnode->vSendMsg.begin(); size_t nSentSize = 0; while (it != pnode->vSendMsg.end()) { - const CSerializeData &data = *it; + const auto &data = *it; assert(data.size() > pnode->nSendOffset); - int nBytes = send(pnode->hSocket, &data[pnode->nSendOffset], data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); + int nBytes = send(pnode->hSocket, reinterpret_cast<const char*>(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); if (nBytes > 0) { pnode->nLastSend = GetTime(); pnode->nSendBytes += nBytes; @@ -825,7 +817,7 @@ struct NodeEvictionCandidate int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastTXTime; - bool fNetworkNode; + bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; CAddress addr; @@ -850,7 +842,7 @@ static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvict { // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block. if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime; - if (a.fNetworkNode != b.fNetworkNode) return b.fNetworkNode; + if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices; return a.nTimeConnected > b.nTimeConnected; } @@ -885,7 +877,8 @@ bool CConnman::AttemptToEvictConnection() if (node->fDisconnect) continue; NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime, - node->nLastBlockTime, node->nLastTXTime, node->fNetworkNode, + node->nLastBlockTime, node->nLastTXTime, + (node->nServices & nRelevantServices) == nRelevantServices, node->fRelayTxes, node->pfilter != NULL, node->addr, node->nKeyedNetGroup}; vEvictionCandidates.push_back(candidate); } @@ -970,7 +963,6 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { CAddress addr; int nInbound = 0; int nMaxInbound = nMaxConnections - (nMaxOutbound + nMaxFeeler); - assert(nMaxInbound > 0); if (hSocket != INVALID_SOCKET) if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr)) @@ -992,6 +984,12 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { return; } + if (!fNetworkActive) { + LogPrintf("connection from %s dropped: not accepting new connections\n", addr.ToString()); + CloseSocket(hSocket); + return; + } + if (!IsSelectableSocket(hSocket)) { LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString()); @@ -1025,10 +1023,13 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { } } - CNode* pnode = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), "", true); - GetNodeSignals().InitializeNode(pnode->GetId(), pnode); + NodeId id = GetNewNodeId(); + uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); + + CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, "", true); pnode->AddRef(); pnode->fWhitelisted = whitelisted; + GetNodeSignals().InitializeNode(pnode, *this); LogPrint("net", "connection from %s accepted\n", addr.ToString()); @@ -1053,7 +1054,7 @@ void CConnman::ThreadSocketHandler() BOOST_FOREACH(CNode* pnode, vNodesCopy) { if (pnode->fDisconnect || - (pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0 && pnode->ssSend.empty())) + (pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0)) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); @@ -1065,8 +1066,7 @@ void CConnman::ThreadSocketHandler() pnode->CloseSocketDisconnect(); // hold in disconnected pool until all refs are released - if (pnode->fNetworkNode || pnode->fInbound) - pnode->Release(); + pnode->Release(); vNodesDisconnected.push_back(pnode); } } @@ -1101,8 +1101,13 @@ void CConnman::ThreadSocketHandler() } } } - if(vNodes.size() != nPrevNodeCount) { - nPrevNodeCount = vNodes.size(); + size_t vNodesSize; + { + LOCK(cs_vNodes); + vNodesSize = vNodes.size(); + } + if(vNodesSize != nPrevNodeCount) { + nPrevNodeCount = vNodesSize; if(clientInterface) clientInterface->NotifyNumConnectionsChanged(nPrevNodeCount); } @@ -1157,10 +1162,6 @@ void CConnman::ThreadSocketHandler() { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { - if (pnode->nOptimisticBytesWritten) { - RecordBytesSent(pnode->nOptimisticBytesWritten); - pnode->nOptimisticBytesWritten = 0; - } if (!pnode->vSendMsg.empty()) { FD_SET(pnode->hSocket, &fdsetSend); continue; @@ -1792,6 +1793,9 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai // Initiate outbound network connection // boost::this_thread::interruption_point(); + if (!fNetworkActive) { + return false; + } if (!pszDest) { if (IsLocal(addrConnect) || FindNode((CNetAddr)addrConnect) || IsBanned(addrConnect) || @@ -1807,7 +1811,6 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai return false; if (grantOutbound) grantOutbound->MoveTo(pnode->grantOutbound); - pnode->fNetworkNode = true; if (fOneShot) pnode->fOneShot = true; if (fFeeler) @@ -2033,8 +2036,30 @@ void Discover(boost::thread_group& threadGroup) #endif } +void CConnman::SetNetworkActive(bool active) +{ + if (fDebug) { + LogPrint("net", "SetNetworkActive: %s\n", active); + } + + if (!active) { + fNetworkActive = false; + + LOCK(cs_vNodes); + // Close sockets to all nodes + BOOST_FOREACH(CNode* pnode, vNodes) { + pnode->CloseSocketDisconnect(); + } + } else { + fNetworkActive = true; + } + + uiInterface.NotifyNetworkActiveChanged(fNetworkActive); +} + CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In) : nSeed0(nSeed0In), nSeed1(nSeed1In) { + fNetworkActive = true; setBannedIsDirty = false; fAddressesInitialized = false; nLastNodeId = 0; @@ -2116,13 +2141,6 @@ bool CConnman::Start(boost::thread_group& threadGroup, CScheduler& scheduler, st semOutbound = new CSemaphore(std::min((nMaxOutbound + nMaxFeeler), nMaxConnections)); } - if (pnodeLocalHost == NULL) { - CNetAddr local; - LookupHost("127.0.0.1", local, false); - pnodeLocalHost = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), INVALID_SOCKET, CAddress(CService(local, 0), nLocalServices), 0); - GetNodeSignals().InitializeNode(pnodeLocalHost->GetId(), pnodeLocalHost); - } - // // Start threads // @@ -2138,8 +2156,9 @@ bool CConnman::Start(boost::thread_group& threadGroup, CScheduler& scheduler, st // Initiate outbound connections from -addnode threadGroup.create_thread(boost::bind(&TraceThread<boost::function<void()> >, "addcon", boost::function<void()>(boost::bind(&CConnman::ThreadOpenAddedConnections, this)))); - // Initiate outbound connections - threadGroup.create_thread(boost::bind(&TraceThread<boost::function<void()> >, "opencon", boost::function<void()>(boost::bind(&CConnman::ThreadOpenConnections, this)))); + // Initiate outbound connections unless connect=0 + if (!mapArgs.count("-connect") || mapMultiArgs["-connect"].size() != 1 || mapMultiArgs["-connect"][0] != "0") + threadGroup.create_thread(boost::bind(&TraceThread<boost::function<void()> >, "opencon", boost::function<void()>(boost::bind(&CConnman::ThreadOpenConnections, this)))); // Process messages threadGroup.create_thread(boost::bind(&TraceThread<boost::function<void()> >, "msghand", boost::function<void()>(boost::bind(&CConnman::ThreadMessageHandler, this)))); @@ -2199,9 +2218,6 @@ void CConnman::Stop() vhListenSocket.clear(); delete semOutbound; semOutbound = NULL; - if(pnodeLocalHost) - DeleteNode(pnodeLocalHost); - pnodeLocalHost = NULL; } void CConnman::DeleteNode(CNode* pnode) @@ -2471,50 +2487,20 @@ int CConnman::GetBestHeight() const return nBestHeight.load(std::memory_order_acquire); } -void CNode::Fuzz(int nChance) -{ - if (!fSuccessfullyConnected) return; // Don't fuzz initial handshake - if (GetRand(nChance) != 0) return; // Fuzz 1 of every nChance messages - - switch (GetRand(3)) - { - case 0: - // xor a random byte with a random value: - if (!ssSend.empty()) { - CDataStream::size_type pos = GetRand(ssSend.size()); - ssSend[pos] ^= (unsigned char)(GetRand(256)); - } - break; - case 1: - // delete a random byte: - if (!ssSend.empty()) { - CDataStream::size_type pos = GetRand(ssSend.size()); - ssSend.erase(ssSend.begin()+pos); - } - break; - case 2: - // insert a random byte at a random position - { - CDataStream::size_type pos = GetRand(ssSend.size()); - char ch = (char)GetRand(256); - ssSend.insert(ssSend.begin()+pos, ch); - } - break; - } - // Chance of more than one change half the time: - // (more changes exponentially less likely): - Fuzz(2); -} - unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } unsigned int CConnman::GetSendBufferSize() const{ return nSendBufferMaxSize; } -CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, const std::string& addrNameIn, bool fInboundIn) : - ssSend(SER_NETWORK, INIT_PROTO_VERSION), +CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string& addrNameIn, bool fInboundIn) : addr(addrIn), + fInbound(fInboundIn), + id(idIn), nKeyedNetGroup(nKeyedNetGroupIn), addrKnown(5000, 0.001), - filterInventoryKnown(50000, 0.000001) + filterInventoryKnown(50000, 0.000001), + nLocalHostNonce(nLocalHostNonceIn), + nLocalServices(nLocalServicesIn), + nMyStartingHeight(nMyStartingHeightIn), + nSendVersion(0) { nServices = NODE_NONE; nServicesExpected = NODE_NONE; @@ -2533,8 +2519,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn fOneShot = false; fClient = false; // set by version message fFeeler = false; - fInbound = fInboundIn; - fNetworkNode = false; fSuccessfullyConnected = false; fDisconnect = false; nRefCount = 0; @@ -2562,12 +2546,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn minFeeFilter = 0; lastSentFeeFilter = 0; nextSendTimeFeeFilter = 0; - id = idIn; - nOptimisticBytesWritten = 0; - nLocalServices = nLocalServicesIn; - - GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce)); - nMyStartingHeight = nMyStartingHeightIn; BOOST_FOREACH(const std::string &msg, getAllNetMessageTypes()) mapRecvBytesPerMsgCmd[msg] = 0; @@ -2577,10 +2555,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn LogPrint("net", "Added connection to %s peer=%d\n", addrName, id); else LogPrint("net", "Added connection peer=%d\n", id); - - // Be shy and don't send version until we hear - if (hSocket != INVALID_SOCKET && !fInbound) - PushVersion(); } CNode::~CNode() @@ -2625,65 +2599,42 @@ void CNode::AskFor(const CInv& inv) mapAskFor.insert(std::make_pair(nRequestTime, inv)); } -void CNode::BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend) +void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { - ENTER_CRITICAL_SECTION(cs_vSend); - assert(ssSend.size() == 0); - ssSend << CMessageHeader(Params().MessageStart(), pszCommand, 0); - LogPrint("net", "sending: %s ", SanitizeString(pszCommand)); -} + size_t nMessageSize = msg.data.size(); + size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE; + LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id); -void CNode::AbortMessage() UNLOCK_FUNCTION(cs_vSend) -{ - ssSend.clear(); + std::vector<unsigned char> serializedHeader; + serializedHeader.reserve(CMessageHeader::HEADER_SIZE); + uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize); + CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), nMessageSize); + memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); - LEAVE_CRITICAL_SECTION(cs_vSend); - - LogPrint("net", "(aborted)\n"); -} - -void CNode::EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend) -{ - // The -*messagestest options are intentionally not documented in the help message, - // since they are only used during development to debug the networking code and are - // not intended for end-users. - if (mapArgs.count("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 2)) == 0) - { - LogPrint("net", "dropmessages DROPPING SEND MESSAGE\n"); - AbortMessage(); - return; - } - if (mapArgs.count("-fuzzmessagestest")) - Fuzz(GetArg("-fuzzmessagestest", 10)); + CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr}; - if (ssSend.size() == 0) + size_t nBytesSent = 0; { - LEAVE_CRITICAL_SECTION(cs_vSend); - return; - } - // Set the size - unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE; - WriteLE32((uint8_t*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize); - - //log total amount of bytes per command - mapSendBytesPerMsgCmd[std::string(pszCommand)] += nSize + CMessageHeader::HEADER_SIZE; - - // Set the checksum - uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end()); - assert(ssSend.size () >= CMessageHeader::CHECKSUM_OFFSET + CMessageHeader::CHECKSUM_SIZE); - memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], hash.begin(), CMessageHeader::CHECKSUM_SIZE); - - LogPrint("net", "(%d bytes) peer=%d\n", nSize, id); + LOCK(pnode->cs_vSend); + if(pnode->hSocket == INVALID_SOCKET) { + return; + } + bool optimisticSend(pnode->vSendMsg.empty()); - std::deque<CSerializeData>::iterator it = vSendMsg.insert(vSendMsg.end(), CSerializeData()); - ssSend.GetAndClear(*it); - nSendSize += (*it).size(); + //log total amount of bytes per command + pnode->mapSendBytesPerMsgCmd[msg.command] += nTotalSize; + pnode->nSendSize += nTotalSize; - // If write queue empty, attempt "optimistic write" - if (it == vSendMsg.begin()) - nOptimisticBytesWritten += SocketSendData(this); + pnode->vSendMsg.push_back(std::move(serializedHeader)); + if (nMessageSize) + pnode->vSendMsg.push_back(std::move(msg.data)); - LEAVE_CRITICAL_SECTION(cs_vSend); + // If write queue empty, attempt "optimistic write" + if (optimisticSend == true) + nBytesSent = SocketSendData(pnode); + } + if (nBytesSent) + RecordBytesSent(nBytesSent); } bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) @@ -101,6 +101,20 @@ class CTransaction; class CNodeStats; class CClientUIInterface; +struct CSerializedNetMsg +{ + CSerializedNetMsg() = default; + CSerializedNetMsg(CSerializedNetMsg&&) = default; + CSerializedNetMsg& operator=(CSerializedNetMsg&&) = default; + // No copying, only moves. + CSerializedNetMsg(const CSerializedNetMsg& msg) = delete; + CSerializedNetMsg& operator=(const CSerializedNetMsg&) = delete; + + std::vector<unsigned char> data; + std::string command; +}; + + class CConnman { public: @@ -131,11 +145,15 @@ public: bool Start(boost::thread_group& threadGroup, CScheduler& scheduler, std::string& strNodeError, Options options); void Stop(); bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false); + bool GetNetworkActive() const { return fNetworkActive; }; + void SetNetworkActive(bool active); bool OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = NULL, const char *strDest = NULL, bool fOneShot = false, bool fFeeler = false); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func); + void PushMessage(CNode* pnode, CSerializedNetMsg&& msg); + template<typename Callable> bool ForEachNodeContinueIf(Callable&& func) { @@ -370,6 +388,7 @@ private: unsigned int nReceiveFloodSize; std::vector<ListenSocket> vhListenSocket; + std::atomic<bool> fNetworkActive; banmap_t setBanned; CCriticalSection cs_setBanned; bool setBannedIsDirty; @@ -428,7 +447,7 @@ struct CNodeSignals { boost::signals2::signal<bool (CNode*, CConnman&), CombinerAll> ProcessMessages; boost::signals2::signal<bool (CNode*, CConnman&), CombinerAll> SendMessages; - boost::signals2::signal<void (NodeId, const CNode*)> InitializeNode; + boost::signals2::signal<void (CNode*, CConnman&)> InitializeNode; boost::signals2::signal<void (NodeId, bool&)> FinalizeNode; }; @@ -512,6 +531,9 @@ public: class CNetMessage { +private: + mutable CHash256 hasher; + mutable uint256 data_hash; public: bool in_data; // parsing header (false) or data (true) @@ -539,6 +561,8 @@ public: return (hdr.nMessageSize == nDataPos); } + const uint256& GetMessageHash() const; + void SetVersion(int nVersionIn) { hdrbuf.SetVersion(nVersionIn); @@ -553,17 +577,16 @@ public: /** Information about a peer */ class CNode { + friend class CConnman; public: // socket ServiceFlags nServices; ServiceFlags nServicesExpected; SOCKET hSocket; - CDataStream ssSend; size_t nSendSize; // total size of all vSendMsg entries size_t nSendOffset; // offset inside the first vSendMsg already sent - uint64_t nOptimisticBytesWritten; uint64_t nSendBytes; - std::deque<CSerializeData> vSendMsg; + std::deque<std::vector<unsigned char>> vSendMsg; CCriticalSection cs_vSend; std::deque<CInv> vRecvGetData; @@ -589,10 +612,9 @@ public: bool fFeeler; // If true this node is being used as a short lived feeler. bool fOneShot; bool fClient; - bool fInbound; - bool fNetworkNode; + const bool fInbound; bool fSuccessfullyConnected; - bool fDisconnect; + std::atomic_bool fDisconnect; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's version message // b) the peer may tell us in its version message that we should not relay tx invs @@ -603,7 +625,7 @@ public: CCriticalSection cs_filter; CBloomFilter* pfilter; int nRefCount; - NodeId id; + const NodeId id; const uint64_t nKeyedNetGroup; protected: @@ -611,9 +633,6 @@ protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd; - // Basic fuzz-testing - void Fuzz(int nChance); // modifies ssSend - public: uint256 hashContinue; int nStartingHeight; @@ -669,7 +688,7 @@ public: CAmount lastSentFeeFilter; int64_t nextSendTimeFeeFilter; - CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, const std::string &addrNameIn = "", bool fInboundIn = false); + CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string &addrNameIn = "", bool fInboundIn = false); ~CNode(); private: @@ -677,10 +696,11 @@ private: void operator=(const CNode&); - uint64_t nLocalHostNonce; + const uint64_t nLocalHostNonce; // Services offered to this peer - ServiceFlags nLocalServices; - int nMyStartingHeight; + const ServiceFlags nLocalServices; + const int nMyStartingHeight; + int nSendVersion; public: NodeId GetId() const { @@ -691,6 +711,10 @@ public: return nLocalHostNonce; } + int GetMyStartingHeight() const { + return nMyStartingHeight; + } + int GetRefCount() { assert(nRefCount >= 0); @@ -716,6 +740,25 @@ public: BOOST_FOREACH(CNetMessage &msg, vRecvMsg) msg.SetVersion(nVersionIn); } + void SetSendVersion(int nVersionIn) + { + // Send version may only be changed in the version message, and + // only one version message is allowed per session. We can therefore + // treat this value as const and even atomic as long as it's only used + // once the handshake is complete. Any attempt to set this twice is an + // error. + assert(nSendVersion == 0); + nSendVersion = nVersionIn; + } + + int GetSendVersion() const + { + // The send version should always be explicitly set to + // INIT_PROTO_VERSION rather than using this value until the handshake + // is complete. + assert(nSendVersion != 0); + return nSendVersion; + } CNode* AddRef() { @@ -778,193 +821,6 @@ public: void AskFor(const CInv& inv); - // TODO: Document the postcondition of this function. Is cs_vSend locked? - void BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend); - - // TODO: Document the precondition of this function. Is cs_vSend locked? - void AbortMessage() UNLOCK_FUNCTION(cs_vSend); - - // TODO: Document the precondition of this function. Is cs_vSend locked? - void EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend); - - void PushVersion(); - - - void PushMessage(const char* pszCommand) - { - try - { - BeginMessage(pszCommand); - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1> - void PushMessage(const char* pszCommand, const T1& a1) - { - try - { - BeginMessage(pszCommand); - ssSend << a1; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - /** Send a message containing a1, serialized with flag flag. */ - template<typename T1> - void PushMessageWithFlag(int flag, const char* pszCommand, const T1& a1) - { - try - { - BeginMessage(pszCommand); - WithOrVersion(&ssSend, flag) << a1; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4, typename T5> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4 << a5; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4 << a5 << a6; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - - template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> - void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8, const T9& a9) - { - try - { - BeginMessage(pszCommand); - ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8 << a9; - EndMessage(pszCommand); - } - catch (...) - { - AbortMessage(); - throw; - } - } - void CloseSocketDisconnect(); void copyStats(CNodeStats &stats); diff --git a/src/net_processing.cpp b/src/net_processing.cpp new file mode 100644 index 0000000000..74da788ca8 --- /dev/null +++ b/src/net_processing.cpp @@ -0,0 +1,3029 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "net_processing.h" + +#include "addrman.h" +#include "arith_uint256.h" +#include "blockencodings.h" +#include "chainparams.h" +#include "consensus/validation.h" +#include "hash.h" +#include "init.h" +#include "validation.h" +#include "merkleblock.h" +#include "net.h" +#include "netmessagemaker.h" +#include "netbase.h" +#include "policy/fees.h" +#include "policy/policy.h" +#include "primitives/block.h" +#include "primitives/transaction.h" +#include "random.h" +#include "tinyformat.h" +#include "txmempool.h" +#include "ui_interface.h" +#include "util.h" +#include "utilmoneystr.h" +#include "utilstrencodings.h" +#include "validationinterface.h" + +#include <boost/thread.hpp> + +using namespace std; + +#if defined(NDEBUG) +# error "Bitcoin cannot be compiled without assertions." +#endif + +int64_t nTimeBestReceived = 0; // Used only to inform the wallet of when we last received a block + +struct IteratorComparator +{ + template<typename I> + bool operator()(const I& a, const I& b) + { + return &(*a) < &(*b); + } +}; + +struct COrphanTx { + // When modifying, adapt the copy of this definition in tests/DoS_tests. + CTransaction tx; + NodeId fromPeer; + int64_t nTimeExpire; +}; +map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main); +map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); +void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + +static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8] + +// Internal stuff +namespace { + /** Number of nodes with fSyncStarted. */ + int nSyncStarted = 0; + + /** + * Sources of received blocks, saved to be able to send them reject + * messages or ban them when processing happens afterwards. Protected by + * cs_main. + * Set mapBlockSource[hash].second to false if the node should not be + * punished if the block is invalid. + */ + map<uint256, std::pair<NodeId, bool>> mapBlockSource; + + /** + * Filter for transactions that were recently rejected by + * AcceptToMemoryPool. These are not rerequested until the chain tip + * changes, at which point the entire filter is reset. Protected by + * cs_main. + * + * Without this filter we'd be re-requesting txs from each of our peers, + * increasing bandwidth consumption considerably. For instance, with 100 + * peers, half of which relay a tx we don't accept, that might be a 50x + * bandwidth increase. A flooding attacker attempting to roll-over the + * filter using minimum-sized, 60byte, transactions might manage to send + * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a + * two minute window to send invs to us. + * + * Decreasing the false positive rate is fairly cheap, so we pick one in a + * million to make it highly unlikely for users to have issues with this + * filter. + * + * Memory used: 1.3 MB + */ + std::unique_ptr<CRollingBloomFilter> recentRejects; + uint256 hashRecentRejectsChainTip; + + /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ + struct QueuedBlock { + uint256 hash; + CBlockIndex* pindex; //!< Optional. + bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. + std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads + }; + map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; + + /** Stack of nodes which we have set to announce using compact blocks */ + list<NodeId> lNodesAnnouncingHeaderAndIDs; + + /** Number of preferable block download peers. */ + int nPreferredDownload = 0; + + /** Number of peers from which we're downloading blocks. */ + int nPeersWithValidatedDownloads = 0; + + /** Relay map, protected by cs_main. */ + typedef std::map<uint256, CTransactionRef> MapRelay; + MapRelay mapRelay; + /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */ + std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration; +} // anon namespace + +////////////////////////////////////////////////////////////////////////////// +// +// Registration of network node signals. +// + +namespace { + +struct CBlockReject { + unsigned char chRejectCode; + string strRejectReason; + uint256 hashBlock; +}; + +/** + * Maintain validation-specific state about nodes, protected by cs_main, instead + * by CNode's own locks. This simplifies asynchronous operation, where + * processing of incoming data is done after the ProcessMessage call returns, + * and we're no longer holding the node's locks. + */ +struct CNodeState { + //! The peer's address + const CService address; + //! Whether we have a fully established connection. + bool fCurrentlyConnected; + //! Accumulated misbehaviour score for this peer. + int nMisbehavior; + //! Whether this peer should be disconnected and banned (unless whitelisted). + bool fShouldBan; + //! String name of this peer (debugging/logging purposes). + const std::string name; + //! List of asynchronously-determined block rejections to notify this peer about. + std::vector<CBlockReject> rejects; + //! The best known block we know this peer has announced. + CBlockIndex *pindexBestKnownBlock; + //! The hash of the last unknown block this peer has announced. + uint256 hashLastUnknownBlock; + //! The last full block we both have. + CBlockIndex *pindexLastCommonBlock; + //! The best header we have sent our peer. + CBlockIndex *pindexBestHeaderSent; + //! Length of current-streak of unconnecting headers announcements + int nUnconnectingHeaders; + //! Whether we've started headers synchronization with this peer. + bool fSyncStarted; + //! Since when we're stalling block download progress (in microseconds), or 0. + int64_t nStallingSince; + list<QueuedBlock> vBlocksInFlight; + //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. + int64_t nDownloadingSince; + int nBlocksInFlight; + int nBlocksInFlightValidHeaders; + //! Whether we consider this a preferred download peer. + bool fPreferredDownload; + //! Whether this peer wants invs or headers (when possible) for block announcements. + bool fPreferHeaders; + //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements. + bool fPreferHeaderAndIDs; + /** + * Whether this peer will send us cmpctblocks if we request them. + * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion, + * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send. + */ + bool fProvidesHeaderAndIDs; + //! Whether this peer can give us witnesses + bool fHaveWitness; + //! Whether this peer wants witnesses in cmpctblocks/blocktxns + bool fWantsCmpctWitness; + /** + * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns, + * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns. + */ + bool fSupportsDesiredCmpctVersion; + + CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) { + fCurrentlyConnected = false; + nMisbehavior = 0; + fShouldBan = false; + pindexBestKnownBlock = NULL; + hashLastUnknownBlock.SetNull(); + pindexLastCommonBlock = NULL; + pindexBestHeaderSent = NULL; + nUnconnectingHeaders = 0; + fSyncStarted = false; + nStallingSince = 0; + nDownloadingSince = 0; + nBlocksInFlight = 0; + nBlocksInFlightValidHeaders = 0; + fPreferredDownload = false; + fPreferHeaders = false; + fPreferHeaderAndIDs = false; + fProvidesHeaderAndIDs = false; + fHaveWitness = false; + fWantsCmpctWitness = false; + fSupportsDesiredCmpctVersion = false; + } +}; + +/** Map maintaining per-node state. Requires cs_main. */ +map<NodeId, CNodeState> mapNodeState; + +// Requires cs_main. +CNodeState *State(NodeId pnode) { + map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode); + if (it == mapNodeState.end()) + return NULL; + return &it->second; +} + +void UpdatePreferredDownload(CNode* node, CNodeState* state) +{ + nPreferredDownload -= state->fPreferredDownload; + + // Whether this node should be marked as a preferred download node. + state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; + + nPreferredDownload += state->fPreferredDownload; +} + +void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime) +{ + ServiceFlags nLocalNodeServices = pnode->GetLocalServices(); + uint64_t nonce = pnode->GetLocalNonce(); + int nNodeStartingHeight = pnode->GetMyStartingHeight(); + NodeId nodeid = pnode->GetId(); + CAddress addr = pnode->addr; + + CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); + CAddress addrMe = CAddress(CService(), nLocalNodeServices); + + connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe, + nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes)); + + if (fLogIPs) + LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid); + else + LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid); +} + +void InitializeNode(CNode *pnode, CConnman& connman) { + CAddress addr = pnode->addr; + std::string addrName = pnode->addrName; + NodeId nodeid = pnode->GetId(); + { + LOCK(cs_main); + mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName))); + } + if(!pnode->fInbound) + PushNodeVersion(pnode, connman, GetTime()); +} + +void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) { + fUpdateConnectionTime = false; + LOCK(cs_main); + CNodeState *state = State(nodeid); + + if (state->fSyncStarted) + nSyncStarted--; + + if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { + fUpdateConnectionTime = true; + } + + BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) { + mapBlocksInFlight.erase(entry.hash); + } + EraseOrphansFor(nodeid); + nPreferredDownload -= state->fPreferredDownload; + nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); + assert(nPeersWithValidatedDownloads >= 0); + + mapNodeState.erase(nodeid); + + if (mapNodeState.empty()) { + // Do a consistency check after the last peer is removed. + assert(mapBlocksInFlight.empty()); + assert(nPreferredDownload == 0); + assert(nPeersWithValidatedDownloads == 0); + } +} + +// Requires cs_main. +// Returns a bool indicating whether we requested this block. +// Also used if a block was /not/ received and timed out or started with another peer +bool MarkBlockAsReceived(const uint256& hash) { + map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); + if (itInFlight != mapBlocksInFlight.end()) { + CNodeState *state = State(itInFlight->second.first); + state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; + if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { + // Last validated block on the queue was received. + nPeersWithValidatedDownloads--; + } + if (state->vBlocksInFlight.begin() == itInFlight->second.second) { + // First block on the queue was received, update the start download time for the next one + state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); + } + state->vBlocksInFlight.erase(itInFlight->second.second); + state->nBlocksInFlight--; + state->nStallingSince = 0; + mapBlocksInFlight.erase(itInFlight); + return true; + } + return false; +} + +// Requires cs_main. +// returns false, still setting pit, if the block was already in flight from the same peer +// pit will only be valid as long as the same cs_main lock is being held +bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) { + CNodeState *state = State(nodeid); + assert(state != NULL); + + // Short-circuit most stuff in case its from the same node + map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); + if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { + *pit = &itInFlight->second.second; + return false; + } + + // Make sure it's not listed somewhere already. + MarkBlockAsReceived(hash); + + list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), + {hash, pindex, pindex != NULL, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : NULL)}); + state->nBlocksInFlight++; + state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; + if (state->nBlocksInFlight == 1) { + // We're starting a block download (batch) from this peer. + state->nDownloadingSince = GetTimeMicros(); + } + if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) { + nPeersWithValidatedDownloads++; + } + itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first; + if (pit) + *pit = &itInFlight->second.second; + return true; +} + +/** Check whether the last unknown block a peer advertised is not yet known. */ +void ProcessBlockAvailability(NodeId nodeid) { + CNodeState *state = State(nodeid); + assert(state != NULL); + + if (!state->hashLastUnknownBlock.IsNull()) { + BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); + if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { + if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) + state->pindexBestKnownBlock = itOld->second; + state->hashLastUnknownBlock.SetNull(); + } + } +} + +/** Update tracking information about which blocks a peer is assumed to have. */ +void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { + CNodeState *state = State(nodeid); + assert(state != NULL); + + ProcessBlockAvailability(nodeid); + + BlockMap::iterator it = mapBlockIndex.find(hash); + if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { + // An actually better block was announced. + if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) + state->pindexBestKnownBlock = it->second; + } else { + // An unknown block was announced; just assume that the latest one is the best one. + state->hashLastUnknownBlock = hash; + } +} + +void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) { + if (!nodestate->fSupportsDesiredCmpctVersion) { + // Never ask from peers who can't provide witnesses. + return; + } + if (nodestate->fProvidesHeaderAndIDs) { + for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { + if (*it == pfrom->GetId()) { + lNodesAnnouncingHeaderAndIDs.erase(it); + lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); + return; + } + } + bool fAnnounceUsingCMPCTBLOCK = false; + uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; + if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { + // As per BIP152, we only get 3 of our peers to announce + // blocks using compact encodings. + connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){ + connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + return true; + }); + lNodesAnnouncingHeaderAndIDs.pop_front(); + } + fAnnounceUsingCMPCTBLOCK = true; + connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); + } +} + +// Requires cs_main +bool CanDirectFetch(const Consensus::Params &consensusParams) +{ + return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; +} + +// Requires cs_main +bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex) +{ + if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) + return true; + if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) + return true; + return false; +} + +/** Find the last common ancestor two blocks have. + * Both pa and pb must be non-NULL. */ +CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) { + if (pa->nHeight > pb->nHeight) { + pa = pa->GetAncestor(pb->nHeight); + } else if (pb->nHeight > pa->nHeight) { + pb = pb->GetAncestor(pa->nHeight); + } + + while (pa != pb && pa && pb) { + pa = pa->pprev; + pb = pb->pprev; + } + + // Eventually all chain branches meet at the genesis block. + assert(pa == pb); + return pa; +} + +/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has + * at most count entries. */ +void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) { + if (count == 0) + return; + + vBlocks.reserve(vBlocks.size() + count); + CNodeState *state = State(nodeid); + assert(state != NULL); + + // Make sure pindexBestKnownBlock is up to date, we'll need it. + ProcessBlockAvailability(nodeid); + + if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { + // This peer has nothing interesting. + return; + } + + if (state->pindexLastCommonBlock == NULL) { + // Bootstrap quickly by guessing a parent of our best tip is the forking point. + // Guessing wrong in either direction is not a problem. + state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; + } + + // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor + // of its current tip anymore. Go back enough to fix that. + state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); + if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) + return; + + std::vector<CBlockIndex*> vToFetch; + CBlockIndex *pindexWalk = state->pindexLastCommonBlock; + // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last + // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to + // download that next block if the window were 1 larger. + int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; + int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); + NodeId waitingfor = -1; + while (pindexWalk->nHeight < nMaxHeight) { + // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards + // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive + // as iterating over ~100 CBlockIndex* entries anyway. + int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); + vToFetch.resize(nToFetch); + pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); + vToFetch[nToFetch - 1] = pindexWalk; + for (unsigned int i = nToFetch - 1; i > 0; i--) { + vToFetch[i - 1] = vToFetch[i]->pprev; + } + + // Iterate over those blocks in vToFetch (in forward direction), adding the ones that + // are not yet downloaded and not in flight to vBlocks. In the mean time, update + // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's + // already part of our chain (and therefore don't need it even if pruned). + BOOST_FOREACH(CBlockIndex* pindex, vToFetch) { + if (!pindex->IsValid(BLOCK_VALID_TREE)) { + // We consider the chain that this peer is on invalid. + return; + } + if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) { + // We wouldn't download this block or its descendants from this peer. + return; + } + if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { + if (pindex->nChainTx) + state->pindexLastCommonBlock = pindex; + } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { + // The block is not already downloaded, and not yet in flight. + if (pindex->nHeight > nWindowEnd) { + // We reached the end of the window. + if (vBlocks.size() == 0 && waitingfor != nodeid) { + // We aren't able to fetch anything, but we would be if the download window was one larger. + nodeStaller = waitingfor; + } + return; + } + vBlocks.push_back(pindex); + if (vBlocks.size() == count) { + return; + } + } else if (waitingfor == -1) { + // This is the first already-in-flight block. + waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; + } + } + } +} + +} // anon namespace + +bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { + LOCK(cs_main); + CNodeState *state = State(nodeid); + if (state == NULL) + return false; + stats.nMisbehavior = state->nMisbehavior; + stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; + stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; + BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) { + if (queue.pindex) + stats.vHeightInFlight.push_back(queue.pindex->nHeight); + } + return true; +} + +void RegisterNodeSignals(CNodeSignals& nodeSignals) +{ + nodeSignals.ProcessMessages.connect(&ProcessMessages); + nodeSignals.SendMessages.connect(&SendMessages); + nodeSignals.InitializeNode.connect(&InitializeNode); + nodeSignals.FinalizeNode.connect(&FinalizeNode); +} + +void UnregisterNodeSignals(CNodeSignals& nodeSignals) +{ + nodeSignals.ProcessMessages.disconnect(&ProcessMessages); + nodeSignals.SendMessages.disconnect(&SendMessages); + nodeSignals.InitializeNode.disconnect(&InitializeNode); + nodeSignals.FinalizeNode.disconnect(&FinalizeNode); +} + +////////////////////////////////////////////////////////////////////////////// +// +// mapOrphanTransactions +// + +bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + uint256 hash = tx.GetHash(); + if (mapOrphanTransactions.count(hash)) + return false; + + // Ignore big transactions, to avoid a + // send-big-orphans memory exhaustion attack. If a peer has a legitimate + // large transaction with a missing parent then we assume + // it will rebroadcast it later, after the parent transaction(s) + // have been mined or received. + // 100 orphans, each of which is at most 99,999 bytes big is + // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case): + unsigned int sz = GetTransactionWeight(tx); + if (sz >= MAX_STANDARD_TX_WEIGHT) + { + LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); + return false; + } + + auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); + assert(ret.second); + BOOST_FOREACH(const CTxIn& txin, tx.vin) { + mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); + } + + LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(), + mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); + return true; +} + +int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); + if (it == mapOrphanTransactions.end()) + return 0; + BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin) + { + auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); + if (itPrev == mapOrphanTransactionsByPrev.end()) + continue; + itPrev->second.erase(it); + if (itPrev->second.empty()) + mapOrphanTransactionsByPrev.erase(itPrev); + } + mapOrphanTransactions.erase(it); + return 1; +} + +void EraseOrphansFor(NodeId peer) +{ + int nErased = 0; + map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); + while (iter != mapOrphanTransactions.end()) + { + map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid + if (maybeErase->second.fromPeer == peer) + { + nErased += EraseOrphanTx(maybeErase->second.tx.GetHash()); + } + } + if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); +} + + +unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + unsigned int nEvicted = 0; + static int64_t nNextSweep; + int64_t nNow = GetTime(); + if (nNextSweep <= nNow) { + // Sweep out expired orphan pool entries: + int nErased = 0; + int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; + map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); + while (iter != mapOrphanTransactions.end()) + { + map<uint256, COrphanTx>::iterator maybeErase = iter++; + if (maybeErase->second.nTimeExpire <= nNow) { + nErased += EraseOrphanTx(maybeErase->second.tx.GetHash()); + } else { + nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); + } + } + // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan. + nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; + if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased); + } + while (mapOrphanTransactions.size() > nMaxOrphans) + { + // Evict a random orphan: + uint256 randomhash = GetRandHash(); + map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash); + if (it == mapOrphanTransactions.end()) + it = mapOrphanTransactions.begin(); + EraseOrphanTx(it->first); + ++nEvicted; + } + return nEvicted; +} + +// Requires cs_main. +void Misbehaving(NodeId pnode, int howmuch) +{ + if (howmuch == 0) + return; + + CNodeState *state = State(pnode); + if (state == NULL) + return; + + state->nMisbehavior += howmuch; + int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); + if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) + { + LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); + state->fShouldBan = true; + } else + LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); +} + + + + + + + + +////////////////////////////////////////////////////////////////////////////// +// +// blockchain -> download logic notification +// + +PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) { + // Initialize global variables that cannot be constructed at startup. + recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); +} + +void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int nPosInBlock) { + if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) + return; + + LOCK(cs_main); + + std::vector<uint256> vOrphanErase; + // Which orphan pool entries must we evict? + for (size_t j = 0; j < tx.vin.size(); j++) { + auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout); + if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; + for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { + const CTransaction& orphanTx = (*mi)->second.tx; + const uint256& orphanHash = orphanTx.GetHash(); + vOrphanErase.push_back(orphanHash); + } + } + + // Erase orphan transactions include or precluded by this block + if (vOrphanErase.size()) { + int nErased = 0; + BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) { + nErased += EraseOrphanTx(orphanHash); + } + LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased); + } +} + +void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { + const int nNewHeight = pindexNew->nHeight; + connman->SetBestHeight(nNewHeight); + + if (!fInitialDownload) { + // Find the hashes of all blocks that weren't previously in the best chain. + std::vector<uint256> vHashes; + const CBlockIndex *pindexToAnnounce = pindexNew; + while (pindexToAnnounce != pindexFork) { + vHashes.push_back(pindexToAnnounce->GetBlockHash()); + pindexToAnnounce = pindexToAnnounce->pprev; + if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { + // Limit announcements in case of a huge reorganization. + // Rely on the peer's synchronization mechanism in that case. + break; + } + } + // Relay inventory, but don't relay old inventory during initial block download. + connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) { + if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { + BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) { + pnode->PushBlockHash(hash); + } + } + }); + } + + nTimeBestReceived = GetTime(); +} + +void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) { + LOCK(cs_main); + + const uint256 hash(block.GetHash()); + std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash); + + int nDoS = 0; + if (state.IsInvalid(nDoS)) { + if (it != mapBlockSource.end() && State(it->second.first)) { + assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes + CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash}; + State(it->second.first)->rejects.push_back(reject); + if (nDoS > 0 && it->second.second) + Misbehaving(it->second.first, nDoS); + } + } + if (it != mapBlockSource.end()) + mapBlockSource.erase(it); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Messages +// + + +bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + switch (inv.type) + { + case MSG_TX: + case MSG_WITNESS_TX: + { + assert(recentRejects); + if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) + { + // If the chain tip has changed previously rejected transactions + // might be now valid, e.g. due to a nLockTime'd tx becoming valid, + // or a double-spend. Reset the rejects filter and give those + // txs a second chance. + hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); + recentRejects->reset(); + } + + // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude + // requesting or processing some txs which have already been included in a block + return recentRejects->contains(inv.hash) || + mempool.exists(inv.hash) || + mapOrphanTransactions.count(inv.hash) || + pcoinsTip->HaveCoinsInCache(inv.hash); + } + case MSG_BLOCK: + case MSG_WITNESS_BLOCK: + return mapBlockIndex.count(inv.hash); + } + // Don't know what it is, just say we already got one + return true; +} + +static void RelayTransaction(const CTransaction& tx, CConnman& connman) +{ + CInv inv(MSG_TX, tx.GetHash()); + connman.ForEachNode([&inv](CNode* pnode) + { + pnode->PushInventory(inv); + }); +} + +static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman) +{ + unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s) + + // Relay to a limited number of other nodes + // Use deterministic randomness to send to the same nodes for 24 hours + // at a time so the addrKnowns of the chosen nodes prevent repeats + uint64_t hashAddr = addr.GetHash(); + const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60)); + FastRandomContext insecure_rand; + + std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}}; + assert(nRelayNodes <= best.size()); + + auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) { + if (pnode->nVersion >= CADDR_TIME_VERSION) { + uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); + for (unsigned int i = 0; i < nRelayNodes; i++) { + if (hashKey > best[i].first) { + std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); + best[i] = std::make_pair(hashKey, pnode); + break; + } + } + } + }; + + auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] { + for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { + best[i].second->PushAddress(addr, insecure_rand); + } + }; + + connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); +} + +void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman) +{ + std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); + unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); + vector<CInv> vNotFound; + CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + LOCK(cs_main); + + while (it != pfrom->vRecvGetData.end()) { + // Don't bother if send buffer is too full to respond anyway + if (pfrom->nSendSize >= nMaxSendBufferSize) + break; + + const CInv &inv = *it; + { + boost::this_thread::interruption_point(); + it++; + + if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) + { + bool send = false; + BlockMap::iterator mi = mapBlockIndex.find(inv.hash); + if (mi != mapBlockIndex.end()) + { + if (chainActive.Contains(mi->second)) { + send = true; + } else { + static const int nOneMonth = 30 * 24 * 60 * 60; + // To prevent fingerprinting attacks, only send blocks outside of the active + // chain if they are valid, and no more than a month older (both in time, and in + // best equivalent proof of work) than the best header chain we know about. + send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) && + (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && + (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth); + if (!send) { + LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); + } + } + } + // disconnect node in case we have reached the outbound limit for serving historical blocks + // never disconnect whitelisted nodes + static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical + if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) + { + LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId()); + + //disconnect node + pfrom->fDisconnect = true; + send = false; + } + // Pruned nodes may have deleted the block, so check whether + // it's available before trying to send. + if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) + { + // Send block from disk + CBlock block; + if (!ReadBlockFromDisk(block, (*mi).second, consensusParams)) + assert(!"cannot load block from disk"); + if (inv.type == MSG_BLOCK) + connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block)); + else if (inv.type == MSG_WITNESS_BLOCK) + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, block)); + else if (inv.type == MSG_FILTERED_BLOCK) + { + bool sendMerkleBlock = false; + CMerkleBlock merkleBlock; + { + LOCK(pfrom->cs_filter); + if (pfrom->pfilter) { + sendMerkleBlock = true; + merkleBlock = CMerkleBlock(block, *pfrom->pfilter); + } + } + if (sendMerkleBlock) { + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); + // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see + // This avoids hurting performance by pointlessly requiring a round-trip + // Note that there is currently no way for a node to request any single transactions we didn't send here - + // they must either disconnect and retry or request the full block. + // Thus, the protocol spec specified allows for us to provide duplicate txn here, + // however we MUST always provide at least what the remote peer needs + typedef std::pair<unsigned int, uint256> PairType; + BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn) + connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *block.vtx[pair.first])); + } + // else + // no response + } + else if (inv.type == MSG_CMPCT_BLOCK) + { + // If a peer is asking for old blocks, we're almost guaranteed + // they won't have a useful mempool to match against a compact block, + // and we don't feel like constructing the object for them, so + // instead we respond with the full, non-compact block. + bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; + int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; + if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { + CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness); + connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); + } else + connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, block)); + } + + // Trigger the peer node to send a getblocks request for the next batch of inventory + if (inv.hash == pfrom->hashContinue) + { + // Bypass PushInventory, this must send even if redundant, + // and we want it right after the last block so they don't + // wait for other stuff first. + vector<CInv> vInv; + vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv)); + pfrom->hashContinue.SetNull(); + } + } + } + else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) + { + // Send stream from relay memory + bool push = false; + auto mi = mapRelay.find(inv.hash); + int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); + if (mi != mapRelay.end()) { + connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); + push = true; + } else if (pfrom->timeLastMempoolReq) { + auto txinfo = mempool.info(inv.hash); + // To protect privacy, do not answer getdata using the mempool when + // that TX couldn't have been INVed in reply to a MEMPOOL request. + if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { + connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); + push = true; + } + } + if (!push) { + vNotFound.push_back(inv); + } + } + + // Track requests for our stuff. + GetMainSignals().Inventory(inv.hash); + + if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) + break; + } + } + + pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); + + if (!vNotFound.empty()) { + // Let the peer know that we didn't find what it asked for, so it doesn't + // have to wait around forever. Currently only SPV clients actually care + // about this message: it's needed when they are recursively walking the + // dependencies of relevant unconfirmed transactions. SPV clients want to + // do that because they want to know about (and store and rebroadcast and + // risk analyze) the dependencies of transactions relevant to them, without + // having to download the entire memory pool. + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); + } +} + +uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) { + uint32_t nFetchFlags = 0; + if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) { + nFetchFlags |= MSG_WITNESS_FLAG; + } + return nFetchFlags; +} + +bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman) +{ + unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); + + LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); + if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0) + { + LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); + return true; + } + + + if (!(pfrom->GetLocalServices() & NODE_BLOOM) && + (strCommand == NetMsgType::FILTERLOAD || + strCommand == NetMsgType::FILTERADD)) + { + if (pfrom->nVersion >= NO_BLOOM_VERSION) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 100); + return false; + } else { + pfrom->fDisconnect = true; + return false; + } + } + + + if (strCommand == NetMsgType::VERSION) + { + // Each connection can only send one version message + if (pfrom->nVersion != 0) + { + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"))); + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 1); + return false; + } + + int64_t nTime; + CAddress addrMe; + CAddress addrFrom; + uint64_t nNonce = 1; + uint64_t nServiceInt; + vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe; + pfrom->nServices = ServiceFlags(nServiceInt); + if (!pfrom->fInbound) + { + connman.SetServices(pfrom->addr, pfrom->nServices); + } + if (pfrom->nServicesExpected & ~pfrom->nServices) + { + LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected); + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD, + strprintf("Expected to offer services %08x", pfrom->nServicesExpected))); + pfrom->fDisconnect = true; + return false; + } + + if (pfrom->nVersion < MIN_PEER_PROTO_VERSION) + { + // disconnect from peers older than this proto version + LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion); + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, + strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION))); + pfrom->fDisconnect = true; + return false; + } + + if (pfrom->nVersion == 10300) + pfrom->nVersion = 300; + if (!vRecv.empty()) + vRecv >> addrFrom >> nNonce; + if (!vRecv.empty()) { + vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH); + pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); + } + if (!vRecv.empty()) { + vRecv >> pfrom->nStartingHeight; + } + { + LOCK(pfrom->cs_filter); + if (!vRecv.empty()) + vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message + else + pfrom->fRelayTxes = true; + } + + // Disconnect if we connected to ourself + if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) + { + LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); + pfrom->fDisconnect = true; + return true; + } + + pfrom->addrLocal = addrMe; + if (pfrom->fInbound && addrMe.IsRoutable()) + { + SeenLocal(addrMe); + } + + // Be shy and don't send version until we hear + if (pfrom->fInbound) + PushNodeVersion(pfrom, connman, GetAdjustedTime()); + + pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); + + if((pfrom->nServices & NODE_WITNESS)) + { + LOCK(cs_main); + State(pfrom->GetId())->fHaveWitness = true; + } + + // Potentially mark this peer as a preferred download peer. + { + LOCK(cs_main); + UpdatePreferredDownload(pfrom, State(pfrom->GetId())); + } + + // Change version + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); + int nSendVersion = std::min(pfrom->nVersion, PROTOCOL_VERSION); + pfrom->SetSendVersion(nSendVersion); + + if (!pfrom->fInbound) + { + // Advertise our address + if (fListen && !IsInitialBlockDownload()) + { + CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices()); + FastRandomContext insecure_rand; + if (addr.IsRoutable()) + { + LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString()); + pfrom->PushAddress(addr, insecure_rand); + } else if (IsPeerAddrLocalGood(pfrom)) { + addr.SetIP(pfrom->addrLocal); + LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString()); + pfrom->PushAddress(addr, insecure_rand); + } + } + + // Get recent addresses + if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000) + { + connman.PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); + pfrom->fGetAddr = true; + } + connman.MarkAddressGood(pfrom->addr); + } + + pfrom->fSuccessfullyConnected = true; + + string remoteAddr; + if (fLogIPs) + remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); + + LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n", + pfrom->cleanSubVer, pfrom->nVersion, + pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, + remoteAddr); + + int64_t nTimeOffset = nTime - GetTime(); + pfrom->nTimeOffset = nTimeOffset; + AddTimeData(pfrom->addr, nTimeOffset); + + // Feeler connections exist only to verify if address is online. + if (pfrom->fFeeler) { + assert(pfrom->fInbound == false); + pfrom->fDisconnect = true; + } + return true; + } + + + else if (pfrom->nVersion == 0) + { + // Must have a version message before anything else + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 1); + return false; + } + + // At this point, the outgoing message serialization version can't change. + CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + + if (strCommand == NetMsgType::VERACK) + { + pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); + + if (!pfrom->fInbound) { + // Mark this node as currently connected, so we update its timestamp later. + LOCK(cs_main); + State(pfrom->GetId())->fCurrentlyConnected = true; + } + + if (pfrom->nVersion >= SENDHEADERS_VERSION) { + // Tell our peer we prefer to receive headers rather than inv's + // We send this to non-NODE NETWORK peers as well, because even + // non-NODE NETWORK peers can announce blocks (such as pruning + // nodes) + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); + } + if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { + // Tell our peer we are willing to provide version 1 or 2 cmpctblocks + // However, we do not request new block announcements using + // cmpctblock messages. + // We send this to non-NODE NETWORK peers as well, because + // they may wish to request compact blocks from us + bool fAnnounceUsingCMPCTBLOCK = false; + uint64_t nCMPCTBLOCKVersion = 2; + if (pfrom->GetLocalServices() & NODE_WITNESS) + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + nCMPCTBLOCKVersion = 1; + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + } + } + + + else if (strCommand == NetMsgType::ADDR) + { + vector<CAddress> vAddr; + vRecv >> vAddr; + + // Don't want addr from older versions unless seeding + if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000) + return true; + if (vAddr.size() > 1000) + { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 20); + return error("message addr size() = %u", vAddr.size()); + } + + // Store the new addresses + vector<CAddress> vAddrOk; + int64_t nNow = GetAdjustedTime(); + int64_t nSince = nNow - 10 * 60; + BOOST_FOREACH(CAddress& addr, vAddr) + { + boost::this_thread::interruption_point(); + + if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) + continue; + + if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) + addr.nTime = nNow - 5 * 24 * 60 * 60; + pfrom->AddAddressKnown(addr); + bool fReachable = IsReachable(addr); + if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) + { + // Relay to a limited number of other nodes + RelayAddress(addr, fReachable, connman); + } + // Do not store addresses outside our network + if (fReachable) + vAddrOk.push_back(addr); + } + connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60); + if (vAddr.size() < 1000) + pfrom->fGetAddr = false; + if (pfrom->fOneShot) + pfrom->fDisconnect = true; + } + + else if (strCommand == NetMsgType::SENDHEADERS) + { + LOCK(cs_main); + State(pfrom->GetId())->fPreferHeaders = true; + } + + else if (strCommand == NetMsgType::SENDCMPCT) + { + bool fAnnounceUsingCMPCTBLOCK = false; + uint64_t nCMPCTBLOCKVersion = 0; + vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; + if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) { + LOCK(cs_main); + // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness) + if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) { + State(pfrom->GetId())->fProvidesHeaderAndIDs = true; + State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2; + } + if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces + State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; + if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) { + if (pfrom->GetLocalServices() & NODE_WITNESS) + State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2); + else + State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1); + } + } + } + + + else if (strCommand == NetMsgType::INV) + { + vector<CInv> vInv; + vRecv >> vInv; + if (vInv.size() > MAX_INV_SZ) + { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 20); + return error("message inv size() = %u", vInv.size()); + } + + bool fBlocksOnly = !fRelayTxes; + + // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true + if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) + fBlocksOnly = false; + + LOCK(cs_main); + + uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()); + + std::vector<CInv> vToFetch; + + for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) + { + CInv &inv = vInv[nInv]; + + boost::this_thread::interruption_point(); + + bool fAlreadyHave = AlreadyHave(inv); + LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); + + if (inv.type == MSG_TX) { + inv.type |= nFetchFlags; + } + + if (inv.type == MSG_BLOCK) { + UpdateBlockAvailability(pfrom->GetId(), inv.hash); + if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { + // We used to request the full block here, but since headers-announcements are now the + // primary method of announcement on the network, and since, in the case that a node + // fell back to inv we probably have a reorg which we should get the headers for first, + // we now only provide a getheaders response here. When we receive the headers, we will + // then ask for the blocks we need. + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash)); + LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); + } + } + else + { + pfrom->AddInventoryKnown(inv); + if (fBlocksOnly) + LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id); + else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) + pfrom->AskFor(inv); + } + + // Track requests for our stuff + GetMainSignals().Inventory(inv.hash); + + if (pfrom->nSendSize > (nMaxSendBufferSize * 2)) { + Misbehaving(pfrom->GetId(), 50); + return error("send buffer size() = %u", pfrom->nSendSize); + } + } + + if (!vToFetch.empty()) + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vToFetch)); + } + + + else if (strCommand == NetMsgType::GETDATA) + { + vector<CInv> vInv; + vRecv >> vInv; + if (vInv.size() > MAX_INV_SZ) + { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 20); + return error("message getdata size() = %u", vInv.size()); + } + + if (fDebug || (vInv.size() != 1)) + LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); + + if ((fDebug && vInv.size() > 0) || (vInv.size() == 1)) + LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); + + pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); + ProcessGetData(pfrom, chainparams.GetConsensus(), connman); + } + + + else if (strCommand == NetMsgType::GETBLOCKS) + { + CBlockLocator locator; + uint256 hashStop; + vRecv >> locator >> hashStop; + + LOCK(cs_main); + + // Find the last block the caller has in the main chain + CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator); + + // Send the rest of the chain + if (pindex) + pindex = chainActive.Next(pindex); + int nLimit = 500; + LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); + for (; pindex; pindex = chainActive.Next(pindex)) + { + if (pindex->GetBlockHash() == hashStop) + { + LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + break; + } + // If pruning, don't inv blocks unless we have on disk and are likely to still have + // for some reasonable time window (1 hour) that block relay might require. + const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; + if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) + { + LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + break; + } + pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); + if (--nLimit <= 0) + { + // When this block is requested, we'll send an inv that'll + // trigger the peer to getblocks the next batch of inventory. + LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + pfrom->hashContinue = pindex->GetBlockHash(); + break; + } + } + } + + + else if (strCommand == NetMsgType::GETBLOCKTXN) + { + BlockTransactionsRequest req; + vRecv >> req; + + LOCK(cs_main); + + BlockMap::iterator it = mapBlockIndex.find(req.blockhash); + if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) { + LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id); + return true; + } + + if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { + // If an older block is requested (should never happen in practice, + // but can happen in tests) send a block response instead of a + // blocktxn response. Sending a full block response instead of a + // small blocktxn response is preferable in the case where a peer + // might maliciously send lots of getblocktxn requests to trigger + // expensive disk reads, because it will require the peer to + // actually receive all the data read from disk over the network. + LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH); + CInv inv; + inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK; + inv.hash = req.blockhash; + pfrom->vRecvGetData.push_back(inv); + ProcessGetData(pfrom, chainparams.GetConsensus(), connman); + return true; + } + + CBlock block; + bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()); + assert(ret); + + BlockTransactions resp(req); + for (size_t i = 0; i < req.indexes.size(); i++) { + if (req.indexes[i] >= block.vtx.size()) { + Misbehaving(pfrom->GetId(), 100); + LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id); + return true; + } + resp.txn[i] = block.vtx[req.indexes[i]]; + } + int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; + connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); + } + + + else if (strCommand == NetMsgType::GETHEADERS) + { + CBlockLocator locator; + uint256 hashStop; + vRecv >> locator >> hashStop; + + LOCK(cs_main); + if (IsInitialBlockDownload() && !pfrom->fWhitelisted) { + LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id); + return true; + } + + CNodeState *nodestate = State(pfrom->GetId()); + CBlockIndex* pindex = NULL; + if (locator.IsNull()) + { + // If locator is null, return the hashStop block + BlockMap::iterator mi = mapBlockIndex.find(hashStop); + if (mi == mapBlockIndex.end()) + return true; + pindex = (*mi).second; + } + else + { + // Find the last block the caller has in the main chain + pindex = FindForkInGlobalIndex(chainActive, locator); + if (pindex) + pindex = chainActive.Next(pindex); + } + + // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end + vector<CBlock> vHeaders; + int nLimit = MAX_HEADERS_RESULTS; + LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id); + for (; pindex; pindex = chainActive.Next(pindex)) + { + vHeaders.push_back(pindex->GetBlockHeader()); + if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) + break; + } + // pindex can be NULL either if we sent chainActive.Tip() OR + // if our peer has chainActive.Tip() (and thus we are sending an empty + // headers message). In both cases it's safe to update + // pindexBestHeaderSent to be our tip. + nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); + } + + + else if (strCommand == NetMsgType::TX) + { + // Stop processing the transaction early if + // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off + if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) + { + LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id); + return true; + } + + deque<COutPoint> vWorkQueue; + vector<uint256> vEraseQueue; + CTransaction tx(deserialize, vRecv); + + CInv inv(MSG_TX, tx.GetHash()); + pfrom->AddInventoryKnown(inv); + + LOCK(cs_main); + + bool fMissingInputs = false; + CValidationState state; + + pfrom->setAskFor.erase(inv.hash); + mapAlreadyAskedFor.erase(inv.hash); + + if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) { + mempool.check(pcoinsTip); + RelayTransaction(tx, connman); + for (unsigned int i = 0; i < tx.vout.size(); i++) { + vWorkQueue.emplace_back(inv.hash, i); + } + + pfrom->nLastTXTime = GetTime(); + + LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n", + pfrom->id, + tx.GetHash().ToString(), + mempool.size(), mempool.DynamicMemoryUsage() / 1000); + + // Recursively process any orphan transactions that depended on this one + set<NodeId> setMisbehaving; + while (!vWorkQueue.empty()) { + auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front()); + vWorkQueue.pop_front(); + if (itByPrev == mapOrphanTransactionsByPrev.end()) + continue; + for (auto mi = itByPrev->second.begin(); + mi != itByPrev->second.end(); + ++mi) + { + const CTransaction& orphanTx = (*mi)->second.tx; + const uint256& orphanHash = orphanTx.GetHash(); + NodeId fromPeer = (*mi)->second.fromPeer; + bool fMissingInputs2 = false; + // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan + // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get + // anyone relaying LegitTxX banned) + CValidationState stateDummy; + + + if (setMisbehaving.count(fromPeer)) + continue; + if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) { + LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); + RelayTransaction(orphanTx, connman); + for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { + vWorkQueue.emplace_back(orphanHash, i); + } + vEraseQueue.push_back(orphanHash); + } + else if (!fMissingInputs2) + { + int nDos = 0; + if (stateDummy.IsInvalid(nDos) && nDos > 0) + { + // Punish peer that gave us an invalid orphan tx + Misbehaving(fromPeer, nDos); + setMisbehaving.insert(fromPeer); + LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString()); + } + // Has inputs but not accepted to mempool + // Probably non-standard or insufficient fee/priority + LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString()); + vEraseQueue.push_back(orphanHash); + if (orphanTx.wit.IsNull() && !stateDummy.CorruptionPossible()) { + // Do not use rejection cache for witness transactions or + // witness-stripped transactions, as they can have been malleated. + // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + assert(recentRejects); + recentRejects->insert(orphanHash); + } + } + mempool.check(pcoinsTip); + } + } + + BOOST_FOREACH(uint256 hash, vEraseQueue) + EraseOrphanTx(hash); + } + else if (fMissingInputs) + { + bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected + BOOST_FOREACH(const CTxIn& txin, tx.vin) { + if (recentRejects->contains(txin.prevout.hash)) { + fRejectedParents = true; + break; + } + } + if (!fRejectedParents) { + uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()); + BOOST_FOREACH(const CTxIn& txin, tx.vin) { + CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); + pfrom->AddInventoryKnown(_inv); + if (!AlreadyHave(_inv)) pfrom->AskFor(_inv); + } + AddOrphanTx(tx, pfrom->GetId()); + + // DoS prevention: do not allow mapOrphanTransactions to grow unbounded + unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); + unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); + if (nEvicted > 0) + LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted); + } else { + LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); + } + } else { + if (tx.wit.IsNull() && !state.CorruptionPossible()) { + // Do not use rejection cache for witness transactions or + // witness-stripped transactions, as they can have been malleated. + // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + assert(recentRejects); + recentRejects->insert(tx.GetHash()); + } + + if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { + // Always relay transactions received from whitelisted peers, even + // if they were already in the mempool or rejected from it due + // to policy, allowing the node to function as a gateway for + // nodes hidden behind it. + // + // Never relay transactions that we would assign a non-zero DoS + // score for, as we expect peers to do the same with us in that + // case. + int nDoS = 0; + if (!state.IsInvalid(nDoS) || nDoS == 0) { + LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); + RelayTransaction(tx, connman); + } else { + LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); + } + } + } + int nDoS = 0; + if (state.IsInvalid(nDoS)) + { + LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), + pfrom->id, + FormatStateMessage(state)); + if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), + state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash)); + if (nDoS > 0) { + Misbehaving(pfrom->GetId(), nDoS); + } + } + } + + + else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing + { + CBlockHeaderAndShortTxIDs cmpctblock; + vRecv >> cmpctblock; + + { + LOCK(cs_main); + + if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { + // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers + if (!IsInitialBlockDownload()) + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + return true; + } + } + + CBlockIndex *pindex = NULL; + CValidationState state; + if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) { + int nDoS; + if (state.IsInvalid(nDoS)) { + if (nDoS > 0) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), nDoS); + } + LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id); + return true; + } + } + + LOCK(cs_main); + // If AcceptBlockHeader returned true, it set pindex + assert(pindex); + UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); + + std::map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); + bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); + + if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here + return true; + + if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better + pindex->nTx != 0) { // We had this block at some point, but pruned it + if (fAlreadyInFlight) { + // We requested this block for some reason, but our mempool will probably be useless + // so we just grab the block via normal getdata + std::vector<CInv> vInv(1); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + } + return true; + } + + // If we're not close to tip yet, give up and let parallel block fetch work its magic + if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) + return true; + + CNodeState *nodestate = State(pfrom->GetId()); + + if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) { + // Don't bother trying to process compact blocks from v1 peers + // after segwit activates. + return true; + } + + // We want to be a bit conservative just to be extra careful about DoS + // possibilities in compact block processing... + if (pindex->nHeight <= chainActive.Height() + 2) { + if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || + (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) { + list<QueuedBlock>::iterator *queuedBlockIt = NULL; + if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) { + if (!(*queuedBlockIt)->partialBlock) + (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool)); + else { + // The block was already in flight using compact blocks from the same peer + LogPrint("net", "Peer sent us compact block we were already syncing!\n"); + return true; + } + } + + PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; + ReadStatus status = partialBlock.InitData(cmpctblock); + if (status == READ_STATUS_INVALID) { + MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist + Misbehaving(pfrom->GetId(), 100); + LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id); + return true; + } else if (status == READ_STATUS_FAILED) { + // Duplicate txindexes, the block is now in-flight, so just request it + std::vector<CInv> vInv(1); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + return true; + } + + if (!fAlreadyInFlight && mapBlocksInFlight.size() == 1 && pindex->pprev->IsValid(BLOCK_VALID_CHAIN)) { + // We seem to be rather well-synced, so it appears pfrom was the first to provide us + // with this block! Let's get them to announce using compact blocks in the future. + MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); + } + + BlockTransactionsRequest req; + for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { + if (!partialBlock.IsTxAvailable(i)) + req.indexes.push_back(i); + } + if (req.indexes.empty()) { + // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions) + BlockTransactions txn; + txn.blockhash = cmpctblock.header.GetHash(); + CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); + blockTxnMsg << txn; + return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman); + } else { + req.blockhash = pindex->GetBlockHash(); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); + } + } + } else { + if (fAlreadyInFlight) { + // We requested this block, but its far into the future, so our + // mempool will probably be useless - request the block normally + std::vector<CInv> vInv(1); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + return true; + } else { + // If this was an announce-cmpctblock, we want the same treatment as a header message + // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions) + std::vector<CBlock> headers; + headers.push_back(cmpctblock.header); + CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION); + vHeadersMsg << headers; + return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman); + } + } + } + + else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing + { + BlockTransactions resp; + vRecv >> resp; + + std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); + bool fBlockRead = false; + { + LOCK(cs_main); + + map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash); + if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || + it->second.first != pfrom->GetId()) { + LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id); + return true; + } + + PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; + ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); + if (status == READ_STATUS_INVALID) { + MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist + Misbehaving(pfrom->GetId(), 100); + LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id); + return true; + } else if (status == READ_STATUS_FAILED) { + // Might have collided, fall back to getdata now :( + std::vector<CInv> invs; + invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); + } else { + // Block is either okay, or possibly we received + // READ_STATUS_CHECKBLOCK_FAILED. + // Note that CheckBlock can only fail for one of a few reasons: + // 1. bad-proof-of-work (impossible here, because we've already + // accepted the header) + // 2. merkleroot doesn't match the transactions given (already + // caught in FillBlock with READ_STATUS_FAILED, so + // impossible here) + // 3. the block is otherwise invalid (eg invalid coinbase, + // block is too big, too many legacy sigops, etc). + // So if CheckBlock failed, #3 is the only possibility. + // Under BIP 152, we don't DoS-ban unless proof of work is + // invalid (we don't require all the stateless checks to have + // been run). This is handled below, so just treat this as + // though the block was successfully read, and rely on the + // handling in ProcessNewBlock to ensure the block index is + // updated, reject messages go out, etc. + MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer + fBlockRead = true; + // mapBlockSource is only used for sending reject messages and DoS scores, + // so the race between here and cs_main in ProcessNewBlock is fine. + // BIP 152 permits peers to relay compact blocks after validating + // the header only; we should not punish peers if the block turns + // out to be invalid. + mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false)); + } + } // Don't hold cs_main when we call into ProcessNewBlock + if (fBlockRead) { + bool fNewBlock = false; + // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, + // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) + ProcessNewBlock(chainparams, pblock, true, &fNewBlock); + if (fNewBlock) + pfrom->nLastBlockTime = GetTime(); + } + } + + + else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing + { + std::vector<CBlockHeader> headers; + + // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. + unsigned int nCount = ReadCompactSize(vRecv); + if (nCount > MAX_HEADERS_RESULTS) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 20); + return error("headers message size = %u", nCount); + } + headers.resize(nCount); + for (unsigned int n = 0; n < nCount; n++) { + vRecv >> headers[n]; + ReadCompactSize(vRecv); // ignore tx count; assume it is 0. + } + + if (nCount == 0) { + // Nothing interesting. Stop asking this peers for more headers. + return true; + } + + CBlockIndex *pindexLast = NULL; + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + + // If this looks like it could be a block announcement (nCount < + // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that + // don't connect: + // - Send a getheaders message in response to try to connect the chain. + // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that + // don't connect before giving DoS points + // - Once a headers message is received that is valid and does connect, + // nUnconnectingHeaders gets reset back to 0. + if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { + nodestate->nUnconnectingHeaders++; + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", + headers[0].GetHash().ToString(), + headers[0].hashPrevBlock.ToString(), + pindexBestHeader->nHeight, + pfrom->id, nodestate->nUnconnectingHeaders); + // Set hashLastUnknownBlock for this peer, so that if we + // eventually get the headers - even from a different peer - + // we can use this peer to download. + UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); + + if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { + Misbehaving(pfrom->GetId(), 20); + } + return true; + } + + uint256 hashLastBlock; + for (const CBlockHeader& header : headers) { + if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { + Misbehaving(pfrom->GetId(), 20); + return error("non-continuous headers sequence"); + } + hashLastBlock = header.GetHash(); + } + } + + CValidationState state; + if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) { + int nDoS; + if (state.IsInvalid(nDoS)) { + if (nDoS > 0) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), nDoS); + } + return error("invalid header received"); + } + } + + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + if (nodestate->nUnconnectingHeaders > 0) { + LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders); + } + nodestate->nUnconnectingHeaders = 0; + + assert(pindexLast); + UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); + + if (nCount == MAX_HEADERS_RESULTS) { + // Headers message had its maximum size; the peer may have more headers. + // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue + // from there instead. + LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); + } + + bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); + // If this set of headers is valid and ends in a block with at least as + // much work as our tip, download as much as possible. + if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { + vector<CBlockIndex *> vToFetch; + CBlockIndex *pindexWalk = pindexLast; + // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && + !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && + (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { + // We don't have this block, and it's not yet in flight. + vToFetch.push_back(pindexWalk); + } + pindexWalk = pindexWalk->pprev; + } + // If pindexWalk still isn't on our main chain, we're looking at a + // very large reorg at a time we think we're close to caught up to + // the main chain -- this shouldn't really happen. Bail out on the + // direct fetch and rely on parallel download instead. + if (!chainActive.Contains(pindexWalk)) { + LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n", + pindexLast->GetBlockHash().ToString(), + pindexLast->nHeight); + } else { + vector<CInv> vGetData; + // Download as much as possible, from earliest to latest. + BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) { + if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + // Can't download any more from this peer + break; + } + uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()); + vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); + MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); + LogPrint("net", "Requesting block %s from peer=%d\n", + pindex->GetBlockHash().ToString(), pfrom->id); + } + if (vGetData.size() > 1) { + LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n", + pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + } + if (vGetData.size() > 0) { + if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { + // We seem to be rather well-synced, so it appears pfrom was the first to provide us + // with this block! Let's get them to announce using compact blocks in the future. + MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); + // In any case, we want to download using a compact block, not a regular one + vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); + } + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + } + } + } + } + } + + else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing + { + std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); + vRecv >> *pblock; + + LogPrint("net", "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id); + + // Process all blocks from whitelisted peers, even if not requested, + // unless we're still syncing with the network. + // Such an unrequested block may still be processed, subject to the + // conditions in AcceptBlock(). + bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); + const uint256 hash(pblock->GetHash()); + { + LOCK(cs_main); + // Also always process if we requested the block explicitly, as we may + // need it even though it is not a candidate for a new best tip. + forceProcessing |= MarkBlockAsReceived(hash); + // mapBlockSource is only used for sending reject messages and DoS scores, + // so the race between here and cs_main in ProcessNewBlock is fine. + mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true)); + } + bool fNewBlock = false; + ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock); + if (fNewBlock) + pfrom->nLastBlockTime = GetTime(); + } + + + else if (strCommand == NetMsgType::GETADDR) + { + // This asymmetric behavior for inbound and outbound connections was introduced + // to prevent a fingerprinting attack: an attacker can send specific fake addresses + // to users' AddrMan and later request them by sending getaddr messages. + // Making nodes which are behind NAT and can only make outgoing connections ignore + // the getaddr message mitigates the attack. + if (!pfrom->fInbound) { + LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id); + return true; + } + + // Only send one GetAddr response per connection to reduce resource waste + // and discourage addr stamping of INV announcements. + if (pfrom->fSentAddr) { + LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id); + return true; + } + pfrom->fSentAddr = true; + + pfrom->vAddrToSend.clear(); + vector<CAddress> vAddr = connman.GetAddresses(); + FastRandomContext insecure_rand; + BOOST_FOREACH(const CAddress &addr, vAddr) + pfrom->PushAddress(addr, insecure_rand); + } + + + else if (strCommand == NetMsgType::MEMPOOL) + { + if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted) + { + LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId()); + pfrom->fDisconnect = true; + return true; + } + + if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted) + { + LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId()); + pfrom->fDisconnect = true; + return true; + } + + LOCK(pfrom->cs_inventory); + pfrom->fSendMempool = true; + } + + + else if (strCommand == NetMsgType::PING) + { + if (pfrom->nVersion > BIP0031_VERSION) + { + uint64_t nonce = 0; + vRecv >> nonce; + // Echo the message back with the nonce. This allows for two useful features: + // + // 1) A remote node can quickly check if the connection is operational + // 2) Remote nodes can measure the latency of the network thread. If this node + // is overloaded it won't respond to pings quickly and the remote node can + // avoid sending us more work, like chain download requests. + // + // The nonce stops the remote getting confused between different pings: without + // it, if the remote node sends a ping once per second and this node takes 5 + // seconds to respond to each, the 5th ping the remote sends would appear to + // return very quickly. + connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); + } + } + + + else if (strCommand == NetMsgType::PONG) + { + int64_t pingUsecEnd = nTimeReceived; + uint64_t nonce = 0; + size_t nAvail = vRecv.in_avail(); + bool bPingFinished = false; + std::string sProblem; + + if (nAvail >= sizeof(nonce)) { + vRecv >> nonce; + + // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) + if (pfrom->nPingNonceSent != 0) { + if (nonce == pfrom->nPingNonceSent) { + // Matching pong received, this ping is no longer outstanding + bPingFinished = true; + int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; + if (pingUsecTime > 0) { + // Successful ping time measurement, replace previous + pfrom->nPingUsecTime = pingUsecTime; + pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime); + } else { + // This should never happen + sProblem = "Timing mishap"; + } + } else { + // Nonce mismatches are normal when pings are overlapping + sProblem = "Nonce mismatch"; + if (nonce == 0) { + // This is most likely a bug in another implementation somewhere; cancel this ping + bPingFinished = true; + sProblem = "Nonce zero"; + } + } + } else { + sProblem = "Unsolicited pong without ping"; + } + } else { + // This is most likely a bug in another implementation somewhere; cancel this ping + bPingFinished = true; + sProblem = "Short payload"; + } + + if (!(sProblem.empty())) { + LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n", + pfrom->id, + sProblem, + pfrom->nPingNonceSent, + nonce, + nAvail); + } + if (bPingFinished) { + pfrom->nPingNonceSent = 0; + } + } + + + else if (strCommand == NetMsgType::FILTERLOAD) + { + CBloomFilter filter; + vRecv >> filter; + + if (!filter.IsWithinSizeConstraints()) + { + // There is no excuse for sending a too-large filter + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 100); + } + else + { + LOCK(pfrom->cs_filter); + delete pfrom->pfilter; + pfrom->pfilter = new CBloomFilter(filter); + pfrom->pfilter->UpdateEmptyFull(); + pfrom->fRelayTxes = true; + } + } + + + else if (strCommand == NetMsgType::FILTERADD) + { + vector<unsigned char> vData; + vRecv >> vData; + + // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object, + // and thus, the maximum size any matched object can have) in a filteradd message + bool bad = false; + if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { + bad = true; + } else { + LOCK(pfrom->cs_filter); + if (pfrom->pfilter) { + pfrom->pfilter->insert(vData); + } else { + bad = true; + } + } + if (bad) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), 100); + } + } + + + else if (strCommand == NetMsgType::FILTERCLEAR) + { + LOCK(pfrom->cs_filter); + if (pfrom->GetLocalServices() & NODE_BLOOM) { + delete pfrom->pfilter; + pfrom->pfilter = new CBloomFilter(); + } + pfrom->fRelayTxes = true; + } + + + else if (strCommand == NetMsgType::REJECT) + { + if (fDebug) { + try { + string strMsg; unsigned char ccode; string strReason; + vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); + + ostringstream ss; + ss << strMsg << " code " << itostr(ccode) << ": " << strReason; + + if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) + { + uint256 hash; + vRecv >> hash; + ss << ": hash " << hash.ToString(); + } + LogPrint("net", "Reject %s\n", SanitizeString(ss.str())); + } catch (const std::ios_base::failure&) { + // Avoid feedback loops by preventing reject messages from triggering a new reject message. + LogPrint("net", "Unparseable reject message received\n"); + } + } + } + + else if (strCommand == NetMsgType::FEEFILTER) { + CAmount newFeeFilter = 0; + vRecv >> newFeeFilter; + if (MoneyRange(newFeeFilter)) { + { + LOCK(pfrom->cs_feeFilter); + pfrom->minFeeFilter = newFeeFilter; + } + LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id); + } + } + + else if (strCommand == NetMsgType::NOTFOUND) { + // We do not care about the NOTFOUND message, but logging an Unknown Command + // message would be undesirable as we transmit it ourselves. + } + + else { + // Ignore unknown commands for extensibility + LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id); + } + + + + return true; +} + +// requires LOCK(cs_vRecvMsg) +bool ProcessMessages(CNode* pfrom, CConnman& connman) +{ + const CChainParams& chainparams = Params(); + unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); + //if (fDebug) + // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size()); + + // + // Message format + // (4) message start + // (12) command + // (4) size + // (4) checksum + // (x) data + // + bool fOk = true; + + if (!pfrom->vRecvGetData.empty()) + ProcessGetData(pfrom, chainparams.GetConsensus(), connman); + + // this maintains the order of responses + if (!pfrom->vRecvGetData.empty()) return fOk; + + std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin(); + while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) { + // Don't bother if send buffer is too full to respond anyway + if (pfrom->nSendSize >= nMaxSendBufferSize) + break; + + // get next message + CNetMessage& msg = *it; + + //if (fDebug) + // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__, + // msg.hdr.nMessageSize, msg.vRecv.size(), + // msg.complete() ? "Y" : "N"); + + // end, if an incomplete message is found + if (!msg.complete()) + break; + + // at this point, any failure means we can delete the current message + it++; + + // Scan for message start + if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { + LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); + fOk = false; + break; + } + + // Read header + CMessageHeader& hdr = msg.hdr; + if (!hdr.IsValid(chainparams.MessageStart())) + { + LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); + continue; + } + string strCommand = hdr.GetCommand(); + + // Message size + unsigned int nMessageSize = hdr.nMessageSize; + + // Checksum + CDataStream& vRecv = msg.vRecv; + const uint256& hash = msg.GetMessageHash(); + if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) + { + LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, + SanitizeString(strCommand), nMessageSize, + HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), + HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); + continue; + } + + // Process message + bool fRet = false; + try + { + fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman); + boost::this_thread::interruption_point(); + } + catch (const std::ios_base::failure& e) + { + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"))); + if (strstr(e.what(), "end of data")) + { + // Allow exceptions from under-length message on vRecv + LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); + } + else if (strstr(e.what(), "size too large")) + { + // Allow exceptions from over-long size + LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); + } + else if (strstr(e.what(), "non-canonical ReadCompactSize()")) + { + // Allow exceptions from non-canonical encoding + LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); + } + else + { + PrintExceptionContinue(&e, "ProcessMessages()"); + } + } + catch (const boost::thread_interrupted&) { + throw; + } + catch (const std::exception& e) { + PrintExceptionContinue(&e, "ProcessMessages()"); + } catch (...) { + PrintExceptionContinue(NULL, "ProcessMessages()"); + } + + if (!fRet) + LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id); + + break; + } + + // In case the connection got shut down, its receive buffer was wiped + if (!pfrom->fDisconnect) + pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it); + + return fOk; +} + +class CompareInvMempoolOrder +{ + CTxMemPool *mp; +public: + CompareInvMempoolOrder(CTxMemPool *_mempool) + { + mp = _mempool; + } + + bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) + { + /* As std::make_heap produces a max-heap, we want the entries with the + * fewest ancestors/highest fee to sort later. */ + return mp->CompareDepthAndScore(*b, *a); + } +}; + +bool SendMessages(CNode* pto, CConnman& connman) +{ + const Consensus::Params& consensusParams = Params().GetConsensus(); + { + // Don't send anything until we get its version message + if (pto->nVersion == 0 || pto->fDisconnect) + return true; + + // If we get here, the outgoing message serialization version is set and can't change. + CNetMsgMaker msgMaker(pto->GetSendVersion()); + + // + // Message: ping + // + bool pingSend = false; + if (pto->fPingQueued) { + // RPC ping request by user + pingSend = true; + } + if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { + // Ping automatically sent as a latency probe & keepalive. + pingSend = true; + } + if (pingSend) { + uint64_t nonce = 0; + while (nonce == 0) { + GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); + } + pto->fPingQueued = false; + pto->nPingUsecStart = GetTimeMicros(); + if (pto->nVersion > BIP0031_VERSION) { + pto->nPingNonceSent = nonce; + connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); + } else { + // Peer is too old to support ping command with nonce, pong will never arrive. + pto->nPingNonceSent = 0; + connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); + } + } + + TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState() + if (!lockMain) + return true; + + CNodeState &state = *State(pto->GetId()); + + BOOST_FOREACH(const CBlockReject& reject, state.rejects) + connman.PushMessage(pto, msgMaker.Make(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock)); + state.rejects.clear(); + + if (state.fShouldBan) { + state.fShouldBan = false; + if (pto->fWhitelisted) + LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString()); + else { + pto->fDisconnect = true; + if (pto->addr.IsLocal()) + LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString()); + else + { + connman.Ban(pto->addr, BanReasonNodeMisbehaving); + } + return true; + } + } + + // Address refresh broadcast + int64_t nNow = GetTimeMicros(); + if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) { + AdvertiseLocal(pto); + pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); + } + + // + // Message: addr + // + if (pto->nNextAddrSend < nNow) { + pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL); + vector<CAddress> vAddr; + vAddr.reserve(pto->vAddrToSend.size()); + BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend) + { + if (!pto->addrKnown.contains(addr.GetKey())) + { + pto->addrKnown.insert(addr.GetKey()); + vAddr.push_back(addr); + // receiver rejects addr messages larger than 1000 + if (vAddr.size() >= 1000) + { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); + vAddr.clear(); + } + } + } + pto->vAddrToSend.clear(); + if (!vAddr.empty()) + connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); + // we only send the big addr message once + if (pto->vAddrToSend.capacity() > 40) + pto->vAddrToSend.shrink_to_fit(); + } + + // Start block sync + if (pindexBestHeader == NULL) + pindexBestHeader = chainActive.Tip(); + bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. + if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { + // Only actively request headers from a single peer, unless we're close to today. + if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { + state.fSyncStarted = true; + nSyncStarted++; + const CBlockIndex *pindexStart = pindexBestHeader; + /* If possible, start at the block preceding the currently + best known header. This ensures that we always get a + non-empty list of headers back as long as the peer + is up-to-date. With a non-empty response, we can initialise + the peer's known best block. This wouldn't be possible + if we requested starting at pindexBestHeader and + got back an empty response. */ + if (pindexStart->pprev) + pindexStart = pindexStart->pprev; + LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); + connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256())); + } + } + + // Resend wallet transactions that haven't gotten in a block yet + // Except during reindex, importing and IBD, when old wallet + // transactions become unconfirmed and spams other nodes. + if (!fReindex && !fImporting && !IsInitialBlockDownload()) + { + GetMainSignals().Broadcast(nTimeBestReceived, &connman); + } + + // + // Try sending block announcements via headers + // + { + // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our + // list of block hashes we're relaying, and our peer wants + // headers announcements, then find the first header + // not yet known to our peer but would connect, and send. + // If no header would connect, or if we have too many + // blocks, or if the peer doesn't want headers, just + // add all to the inv queue. + LOCK(pto->cs_inventory); + vector<CBlock> vHeaders; + bool fRevertToInv = ((!state.fPreferHeaders && + (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || + pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); + CBlockIndex *pBestIndex = NULL; // last header queued for delivery + ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date + + if (!fRevertToInv) { + bool fFoundStartingHeader = false; + // Try to find first header that our peer doesn't have, and + // then send all headers past that one. If we come across any + // headers that aren't on chainActive, give up. + BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) { + BlockMap::iterator mi = mapBlockIndex.find(hash); + assert(mi != mapBlockIndex.end()); + CBlockIndex *pindex = mi->second; + if (chainActive[pindex->nHeight] != pindex) { + // Bail out if we reorged away from this block + fRevertToInv = true; + break; + } + if (pBestIndex != NULL && pindex->pprev != pBestIndex) { + // This means that the list of blocks to announce don't + // connect to each other. + // This shouldn't really be possible to hit during + // regular operation (because reorgs should take us to + // a chain that has some block not on the prior chain, + // which should be caught by the prior check), but one + // way this could happen is by using invalidateblock / + // reconsiderblock repeatedly on the tip, causing it to + // be added multiple times to vBlockHashesToAnnounce. + // Robustly deal with this rare situation by reverting + // to an inv. + fRevertToInv = true; + break; + } + pBestIndex = pindex; + if (fFoundStartingHeader) { + // add this to the headers message + vHeaders.push_back(pindex->GetBlockHeader()); + } else if (PeerHasHeader(&state, pindex)) { + continue; // keep looking for the first new block + } else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) { + // Peer doesn't have this header but they do have the prior one. + // Start sending headers. + fFoundStartingHeader = true; + vHeaders.push_back(pindex->GetBlockHeader()); + } else { + // Peer doesn't have this header or the prior one -- nothing will + // connect, so bail out. + fRevertToInv = true; + break; + } + } + } + if (!fRevertToInv && !vHeaders.empty()) { + if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { + // We only send up to 1 block as header-and-ids, as otherwise + // probably means we're doing an initial-ish-sync or they're slow + LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__, + vHeaders.front().GetHash().ToString(), pto->id); + //TODO: Shouldn't need to reload block from disk, but requires refactor + CBlock block; + bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams); + assert(ret); + CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness); + int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; + connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); + state.pindexBestHeaderSent = pBestIndex; + } else if (state.fPreferHeaders) { + if (vHeaders.size() > 1) { + LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, + vHeaders.size(), + vHeaders.front().GetHash().ToString(), + vHeaders.back().GetHash().ToString(), pto->id); + } else { + LogPrint("net", "%s: sending header %s to peer=%d\n", __func__, + vHeaders.front().GetHash().ToString(), pto->id); + } + connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); + state.pindexBestHeaderSent = pBestIndex; + } else + fRevertToInv = true; + } + if (fRevertToInv) { + // If falling back to using an inv, just try to inv the tip. + // The last entry in vBlockHashesToAnnounce was our tip at some point + // in the past. + if (!pto->vBlockHashesToAnnounce.empty()) { + const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); + BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); + assert(mi != mapBlockIndex.end()); + CBlockIndex *pindex = mi->second; + + // Warn if we're announcing a block that is not on the main chain. + // This should be very rare and could be optimized out. + // Just log for now. + if (chainActive[pindex->nHeight] != pindex) { + LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n", + hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); + } + + // If the peer's chain has this block, don't inv it back. + if (!PeerHasHeader(&state, pindex)) { + pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); + LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__, + pto->id, hashToAnnounce.ToString()); + } + } + } + pto->vBlockHashesToAnnounce.clear(); + } + + // + // Message: inventory + // + vector<CInv> vInv; + { + LOCK(pto->cs_inventory); + vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX)); + + // Add blocks + BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) { + vInv.push_back(CInv(MSG_BLOCK, hash)); + if (vInv.size() == MAX_INV_SZ) { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); + vInv.clear(); + } + } + pto->vInventoryBlockToSend.clear(); + + // Check whether periodic sends should happen + bool fSendTrickle = pto->fWhitelisted; + if (pto->nNextInvSend < nNow) { + fSendTrickle = true; + // Use half the delay for outbound peers, as there is less privacy concern for them. + pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); + } + + // Time to send but the peer has requested we not relay transactions. + if (fSendTrickle) { + LOCK(pto->cs_filter); + if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear(); + } + + // Respond to BIP35 mempool requests + if (fSendTrickle && pto->fSendMempool) { + auto vtxinfo = mempool.infoAll(); + pto->fSendMempool = false; + CAmount filterrate = 0; + { + LOCK(pto->cs_feeFilter); + filterrate = pto->minFeeFilter; + } + + LOCK(pto->cs_filter); + + for (const auto& txinfo : vtxinfo) { + const uint256& hash = txinfo.tx->GetHash(); + CInv inv(MSG_TX, hash); + pto->setInventoryTxToSend.erase(hash); + if (filterrate) { + if (txinfo.feeRate.GetFeePerK() < filterrate) + continue; + } + if (pto->pfilter) { + if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; + } + pto->filterInventoryKnown.insert(hash); + vInv.push_back(inv); + if (vInv.size() == MAX_INV_SZ) { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); + vInv.clear(); + } + } + pto->timeLastMempoolReq = GetTime(); + } + + // Determine transactions to relay + if (fSendTrickle) { + // Produce a vector with all candidates for sending + vector<std::set<uint256>::iterator> vInvTx; + vInvTx.reserve(pto->setInventoryTxToSend.size()); + for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { + vInvTx.push_back(it); + } + CAmount filterrate = 0; + { + LOCK(pto->cs_feeFilter); + filterrate = pto->minFeeFilter; + } + // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. + // A heap is used so that not all items need sorting if only a few are being sent. + CompareInvMempoolOrder compareInvMempoolOrder(&mempool); + std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); + // No reason to drain out at many times the network's capacity, + // especially since we have many peers and some will draw much shorter delays. + unsigned int nRelayedTransactions = 0; + LOCK(pto->cs_filter); + while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { + // Fetch the top element from the heap + std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); + std::set<uint256>::iterator it = vInvTx.back(); + vInvTx.pop_back(); + uint256 hash = *it; + // Remove it from the to-be-sent set + pto->setInventoryTxToSend.erase(it); + // Check if not in the filter already + if (pto->filterInventoryKnown.contains(hash)) { + continue; + } + // Not in the mempool anymore? don't bother sending it. + auto txinfo = mempool.info(hash); + if (!txinfo.tx) { + continue; + } + if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) { + continue; + } + if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; + // Send + vInv.push_back(CInv(MSG_TX, hash)); + nRelayedTransactions++; + { + // Expire old relay messages + while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) + { + mapRelay.erase(vRelayExpiration.front().second); + vRelayExpiration.pop_front(); + } + + auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx))); + if (ret.second) { + vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first)); + } + } + if (vInv.size() == MAX_INV_SZ) { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); + vInv.clear(); + } + pto->filterInventoryKnown.insert(hash); + } + } + } + if (!vInv.empty()) + connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); + + // Detect whether we're stalling + nNow = GetTimeMicros(); + if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { + // Stalling only triggers when the block download window cannot move. During normal steady state, + // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection + // should only happen during initial block download. + LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); + pto->fDisconnect = true; + return true; + } + // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval + // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. + // We compensate for other peers to prevent killing off peers due to our own downstream link + // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes + // to unreasonably increase our timeout. + if (state.vBlocksInFlight.size() > 0) { + QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); + int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); + if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { + LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); + pto->fDisconnect = true; + return true; + } + } + + // + // Message: getdata (blocks) + // + vector<CInv> vGetData; + if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + vector<CBlockIndex*> vToDownload; + NodeId staller = -1; + FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); + BOOST_FOREACH(CBlockIndex *pindex, vToDownload) { + uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams); + vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); + MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); + LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), + pindex->nHeight, pto->id); + } + if (state.nBlocksInFlight == 0 && staller != -1) { + if (State(staller)->nStallingSince == 0) { + State(staller)->nStallingSince = nNow; + LogPrint("net", "Stall started peer=%d\n", staller); + } + } + } + + // + // Message: getdata (non-blocks) + // + while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) + { + const CInv& inv = (*pto->mapAskFor.begin()).second; + if (!AlreadyHave(inv)) + { + if (fDebug) + LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id); + vGetData.push_back(inv); + if (vGetData.size() >= 1000) + { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + vGetData.clear(); + } + } else { + //If we're not going to ask, don't expect a response. + pto->setAskFor.erase(inv.hash); + } + pto->mapAskFor.erase(pto->mapAskFor.begin()); + } + if (!vGetData.empty()) + connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + + // + // Message: feefilter + // + // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay + if (pto->nVersion >= FEEFILTER_VERSION && GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && + !(pto->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) { + CAmount currentFilter = mempool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); + int64_t timeNow = GetTimeMicros(); + if (timeNow > pto->nextSendTimeFeeFilter) { + static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE); + static FeeFilterRounder filterRounder(default_feerate); + CAmount filterToSend = filterRounder.round(currentFilter); + if (filterToSend != pto->lastSentFeeFilter) { + connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend)); + pto->lastSentFeeFilter = filterToSend; + } + pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); + } + // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY + // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. + else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter && + (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) { + pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; + } + } + } + return true; +} + +class CNetProcessingCleanup +{ +public: + CNetProcessingCleanup() {} + ~CNetProcessingCleanup() { + // orphan transactions + mapOrphanTransactions.clear(); + mapOrphanTransactionsByPrev.clear(); + } +} instance_of_cnetprocessingcleanup; diff --git a/src/net_processing.h b/src/net_processing.h new file mode 100644 index 0000000000..130433cc7c --- /dev/null +++ b/src/net_processing.h @@ -0,0 +1,51 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2015 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_NET_PROCESSING_H +#define BITCOIN_NET_PROCESSING_H + +#include "net.h" +#include "validationinterface.h" + +/** Register with a network node to receive its signals */ +void RegisterNodeSignals(CNodeSignals& nodeSignals); +/** Unregister a network node */ +void UnregisterNodeSignals(CNodeSignals& nodeSignals); + +class PeerLogicValidation : public CValidationInterface { +private: + CConnman* connman; + +public: + PeerLogicValidation(CConnman* connmanIn); + + virtual void SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int nPosInBlock); + virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload); + virtual void BlockChecked(const CBlock& block, const CValidationState& state); +}; + +struct CNodeStateStats { + int nMisbehavior; + int nSyncHeight; + int nCommonHeight; + std::vector<int> vHeightInFlight; +}; + +/** Get statistics from node state */ +bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats); +/** Increase a node's misbehavior score. */ +void Misbehaving(NodeId nodeid, int howmuch); + +/** Process protocol messages received from a given node */ +bool ProcessMessages(CNode* pfrom, CConnman& connman); +/** + * Send queued protocol messages to be sent to a give node. + * + * @param[in] pto The node which we are sending messages to. + * @param[in] connman The connection manager for that node. + */ +bool SendMessages(CNode* pto, CConnman& connman); + +#endif // BITCOIN_NET_PROCESSING_H diff --git a/src/netaddress.h b/src/netaddress.h index 9330fe3328..9dffaa57e7 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -85,7 +85,7 @@ class CNetAddr ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(FLATDATA(ip)); } @@ -122,7 +122,7 @@ class CSubNet ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(network); READWRITE(FLATDATA(netmask)); READWRITE(FLATDATA(valid)); @@ -159,7 +159,7 @@ class CService : public CNetAddr ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(FLATDATA(ip)); unsigned short portN = htons(port); READWRITE(FLATDATA(portN)); diff --git a/src/netbase.cpp b/src/netbase.cpp index 9fe34108f5..da94fd4d13 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -94,30 +94,9 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign } } -#ifdef HAVE_GETADDRINFO_A - struct in_addr ipv4_addr; -#ifdef HAVE_INET_PTON - if (inet_pton(AF_INET, pszName, &ipv4_addr) > 0) { - vIP.push_back(CNetAddr(ipv4_addr)); - return true; - } - - struct in6_addr ipv6_addr; - if (inet_pton(AF_INET6, pszName, &ipv6_addr) > 0) { - vIP.push_back(CNetAddr(ipv6_addr)); - return true; - } -#else - ipv4_addr.s_addr = inet_addr(pszName); - if (ipv4_addr.s_addr != INADDR_NONE) { - vIP.push_back(CNetAddr(ipv4_addr)); - return true; - } -#endif -#endif - struct addrinfo aiHint; memset(&aiHint, 0, sizeof(struct addrinfo)); + aiHint.ai_socktype = SOCK_STREAM; aiHint.ai_protocol = IPPROTO_TCP; aiHint.ai_family = AF_UNSPEC; @@ -126,33 +105,8 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign #else aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST; #endif - struct addrinfo *aiRes = NULL; -#ifdef HAVE_GETADDRINFO_A - struct gaicb gcb, *query = &gcb; - memset(query, 0, sizeof(struct gaicb)); - gcb.ar_name = pszName; - gcb.ar_request = &aiHint; - int nErr = getaddrinfo_a(GAI_NOWAIT, &query, 1, NULL); - if (nErr) - return false; - - do { - // Should set the timeout limit to a reasonable value to avoid - // generating unnecessary checking call during the polling loop, - // while it can still response to stop request quick enough. - // 2 seconds looks fine in our situation. - struct timespec ts = { 2, 0 }; - gai_suspend(&query, 1, &ts); - boost::this_thread::interruption_point(); - - nErr = gai_error(query); - if (0 == nErr) - aiRes = query->ar_result; - } while (nErr == EAI_INPROGRESS); -#else int nErr = getaddrinfo(pszName, NULL, &aiHint, &aiRes); -#endif if (nErr) return false; @@ -338,7 +292,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5Init.push_back(0x01); // # METHODS vSocks5Init.push_back(0x00); // X'00' NO AUTHENTICATION REQUIRED } - ssize_t ret = send(hSocket, (const char*)begin_ptr(vSocks5Init), vSocks5Init.size(), MSG_NOSIGNAL); + ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5Init.size()) { CloseSocket(hSocket); return error("Error sending to proxy"); @@ -363,7 +317,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end()); vAuth.push_back(auth->password.size()); vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end()); - ret = send(hSocket, (const char*)begin_ptr(vAuth), vAuth.size(), MSG_NOSIGNAL); + ret = send(hSocket, (const char*)vAuth.data(), vAuth.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vAuth.size()) { CloseSocket(hSocket); return error("Error sending authentication to proxy"); @@ -393,7 +347,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5.insert(vSocks5.end(), strDest.begin(), strDest.end()); vSocks5.push_back((port >> 8) & 0xFF); vSocks5.push_back((port >> 0) & 0xFF); - ret = send(hSocket, (const char*)begin_ptr(vSocks5), vSocks5.size(), MSG_NOSIGNAL); + ret = send(hSocket, (const char*)vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5.size()) { CloseSocket(hSocket); return error("Error sending to proxy"); diff --git a/src/netmessagemaker.h b/src/netmessagemaker.h new file mode 100644 index 0000000000..7167434a19 --- /dev/null +++ b/src/netmessagemaker.h @@ -0,0 +1,36 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_NETMESSAGEMAKER_H +#define BITCOIN_NETMESSAGEMAKER_H + +#include "net.h" +#include "serialize.h" + +class CNetMsgMaker +{ +public: + CNetMsgMaker(int nVersionIn) : nVersion(nVersionIn){} + + template <typename... Args> + CSerializedNetMsg Make(int nFlags, std::string sCommand, Args&&... args) + { + CSerializedNetMsg msg; + msg.command = std::move(sCommand); + CVectorWriter{ SER_NETWORK, nFlags | nVersion, msg.data, 0, std::forward<Args>(args)... }; + return msg; + } + + template <typename... Args> + CSerializedNetMsg Make(std::string sCommand, Args&&... args) + { + return Make(0, std::move(sCommand), std::forward<Args>(args)...); + } + +private: + const int nVersion; +}; + +#endif // BITCOIN_NETMESSAGEMAKER_H diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index c07cd2eff8..9eb831bc17 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -14,10 +14,9 @@ #include "util.h" void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets, - unsigned int maxConfirms, double _decay, std::string _dataTypeString) + unsigned int maxConfirms, double _decay) { decay = _decay; - dataTypeString = _dataTypeString; for (unsigned int i = 0; i < defaultBuckets.size(); i++) { buckets.push_back(defaultBuckets[i]); bucketMap[defaultBuckets[i]] = i; @@ -87,10 +86,10 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, int maxbucketindex = buckets.size() - 1; - // requireGreater means we are looking for the lowest fee/priority such that all higher - // values pass, so we start at maxbucketindex (highest fee) and look at successively + // requireGreater means we are looking for the lowest feerate such that all higher + // values pass, so we start at maxbucketindex (highest feerate) and look at successively // smaller buckets until we reach failure. Otherwise, we are looking for the highest - // fee/priority such that all lower values fail, and we go in the opposite direction. + // feerate such that all lower values fail, and we go in the opposite direction. unsigned int startbucket = requireGreater ? maxbucketindex : 0; int step = requireGreater ? -1 : 1; @@ -107,7 +106,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, bool foundAnswer = false; unsigned int bins = unconfTxs.size(); - // Start counting from highest(default) or lowest fee/pri transactions + // Start counting from highest(default) or lowest feerate transactions for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { curFarBucket = bucket; nConf += confAvg[confTarget - 1][bucket]; @@ -145,8 +144,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, double median = -1; double txSum = 0; - // Calculate the "average" fee of the best bucket range that met success conditions - // Find the bucket with the median transaction and then report the average fee from that bucket + // Calculate the "average" feerate of the best bucket range that met success conditions + // Find the bucket with the median transaction and then report the average feerate from that bucket // This is a compromise between finding the median which we can't since we don't save all tx's // and reporting the average which is less accurate unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; @@ -166,8 +165,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, } } - LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n", - confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString, + LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n", + confTarget, requireGreater ? ">" : "<", successBreakPoint, requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); @@ -200,10 +199,10 @@ void TxConfirmStats::Read(CAutoFile& filein) filein >> fileBuckets; numBuckets = fileBuckets.size(); if (numBuckets <= 1 || numBuckets > 1000) - throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets"); + throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets"); filein >> fileAvg; if (fileAvg.size() != numBuckets) - throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count"); + throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count"); filein >> fileTxCtAvg; if (fileTxCtAvg.size() != numBuckets) throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count"); @@ -213,9 +212,9 @@ void TxConfirmStats::Read(CAutoFile& filein) throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms"); for (unsigned int i = 0; i < maxConfirms; i++) { if (fileConfAvg[i].size() != numBuckets) - throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count"); + throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count"); } - // Now that we've processed the entire fee estimate data file and not + // Now that we've processed the entire feerate estimate data file and not // thrown any errors, we can copy it to our data structures decay = fileDecay; buckets = fileBuckets; @@ -242,8 +241,8 @@ void TxConfirmStats::Read(CAutoFile& filein) for (unsigned int i = 0; i < buckets.size(); i++) bucketMap[buckets[i]] = i; - LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n", - numBuckets, dataTypeString, maxConfirms); + LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n", + numBuckets, maxConfirms); } unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) @@ -251,7 +250,6 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) unsigned int bucketindex = bucketMap.lower_bound(val)->second; unsigned int blockIndex = nBlockHeight % unconfTxs.size(); unconfTxs[blockIndex][bucketindex]++; - LogPrint("estimatefee", "adding to %s", dataTypeString); return bucketindex; } @@ -291,12 +289,10 @@ void CBlockPolicyEstimator::removeTx(uint256 hash) hash.ToString().c_str()); return; } - TxConfirmStats *stats = pos->second.stats; unsigned int entryHeight = pos->second.blockHeight; unsigned int bucketIndex = pos->second.bucketIndex; - if (stats != NULL) - stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex); + feeStats.removeTx(entryHeight, nBestSeenHeight, bucketIndex); mapMemPoolTxs.erase(hash); } @@ -309,45 +305,14 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee) vfeelist.push_back(bucketBoundary); } vfeelist.push_back(INF_FEERATE); - feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate"); - - minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold(); - std::vector<double> vprilist; - for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) { - vprilist.push_back(bucketBoundary); - } - vprilist.push_back(INF_PRIORITY); - priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority"); - - feeUnlikely = CFeeRate(0); - feeLikely = CFeeRate(INF_FEERATE); - priUnlikely = 0; - priLikely = INF_PRIORITY; -} - -bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri) -{ - if ((pri < minTrackedPriority && fee >= minTrackedFee) || - (pri < priUnlikely && fee > feeLikely)) { - return true; - } - return false; -} - -bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri) -{ - if ((fee < minTrackedFee && pri >= minTrackedPriority) || - (fee < feeUnlikely && pri > priLikely)) { - return true; - } - return false; + feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY); } void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate) { unsigned int txHeight = entry.GetHeight(); uint256 hash = entry.GetTx().GetHash(); - if (mapMemPoolTxs[hash].stats != NULL) { + if (mapMemPoolTxs.count(hash)) { LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", hash.ToString().c_str()); return; @@ -371,30 +336,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo return; } - // Fees are stored and reported as BTC-per-kb: + // Feerates are stored and reported as BTC-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); - // Want the priority of the tx at confirmation. However we don't know - // what that will be and its too hard to continue updating it - // so use starting priority as a proxy - double curPri = entry.GetPriority(txHeight); mapMemPoolTxs[hash].blockHeight = txHeight; - - LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10)); - // Record this as a priority estimate - if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) { - mapMemPoolTxs[hash].stats = &priStats; - mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri); - } - // Record this as a fee estimate - else if (isFeeDataPoint(feeRate, curPri)) { - mapMemPoolTxs[hash].stats = &feeStats; - mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK()); - } - else { - LogPrint("estimatefee", "not adding"); - } - LogPrint("estimatefee", "\n"); + mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK()); } void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry) @@ -417,21 +363,10 @@ void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM return; } - // Fees are stored and reported as BTC-per-kb: + // Feerates are stored and reported as BTC-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); - // Want the priority of the tx at confirmation. The priority when it - // entered the mempool could easily be very small and change quickly - double curPri = entry.GetPriority(nBlockHeight); - - // Record this as a priority estimate - if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) { - priStats.Record(blocksToConfirm, curPri); - } - // Record this as a fee estimate - else if (isFeeDataPoint(feeRate, curPri)) { - feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK()); - } + feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK()); } void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight, @@ -452,41 +387,15 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight, if (!fCurrentEstimate) return; - // Update the dynamic cutoffs - // a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's - // were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks - LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n"); - priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight); - if (priLikely == -1) - priLikely = INF_PRIORITY; - - double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight); - if (feeLikelyEst == -1) - feeLikely = CFeeRate(INF_FEERATE); - else - feeLikely = CFeeRate(feeLikelyEst); - - priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight); - if (priUnlikely == -1) - priUnlikely = 0; - - double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight); - if (feeUnlikelyEst == -1) - feeUnlikely = CFeeRate(0); - else - feeUnlikely = CFeeRate(feeUnlikelyEst); - - // Clear the current block states + // Clear the current block state feeStats.ClearCurrent(nBlockHeight); - priStats.ClearCurrent(nBlockHeight); // Repopulate the current block states for (unsigned int i = 0; i < entries.size(); i++) processBlockTx(nBlockHeight, entries[i]); - // Update all exponential averages with the current block states + // Update all exponential averages with the current block state feeStats.UpdateMovingAverages(); - priStats.UpdateMovingAverages(); LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n", entries.size(), mapMemPoolTxs.size()); @@ -495,7 +404,8 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight, CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) { // Return failure if trying to analyze a target we're not tracking - if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) + // It's not possible to get reasonable estimates for confTarget of 1 + if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) return CFeeRate(0); double median = feeStats.EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); @@ -514,6 +424,10 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) return CFeeRate(0); + // It's not possible to get reasonable estimates for confTarget of 1 + if (confTarget == 1) + confTarget = 2; + double median = -1; while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) { median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); @@ -522,7 +436,7 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun if (answerFoundAtTarget) *answerFoundAtTarget = confTarget - 1; - // If mempool is limiting txs , return at least the min fee from the mempool + // If mempool is limiting txs , return at least the min feerate from the mempool CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); if (minPoolFee > 0 && minPoolFee > median) return CFeeRate(minPoolFee); @@ -535,51 +449,38 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun double CBlockPolicyEstimator::estimatePriority(int confTarget) { - // Return failure if trying to analyze a target we're not tracking - if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms()) - return -1; - - return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); + return -1; } double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool) { if (answerFoundAtTarget) *answerFoundAtTarget = confTarget; - // Return failure if trying to analyze a target we're not tracking - if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms()) - return -1; // If mempool is limiting txs, no priority txs are allowed CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); if (minPoolFee > 0) return INF_PRIORITY; - double median = -1; - while (median < 0 && (unsigned int)confTarget <= priStats.GetMaxConfirms()) { - median = priStats.EstimateMedianVal(confTarget++, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); - } - - if (answerFoundAtTarget) - *answerFoundAtTarget = confTarget - 1; - - return median; + return -1; } void CBlockPolicyEstimator::Write(CAutoFile& fileout) { fileout << nBestSeenHeight; feeStats.Write(fileout); - priStats.Write(fileout); } -void CBlockPolicyEstimator::Read(CAutoFile& filein) +void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion) { int nFileBestSeenHeight; filein >> nFileBestSeenHeight; feeStats.Read(filein); - priStats.Read(filein); nBestSeenHeight = nFileBestSeenHeight; + if (nFileVersion < 139900) { + TxConfirmStats priStats; + priStats.Read(filein); + } } FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee) diff --git a/src/policy/fees.h b/src/policy/fees.h index 2c1ac3b934..ea4c70e616 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_POLICYESTIMATOR_H @@ -19,60 +19,50 @@ class CTxMemPoolEntry; class CTxMemPool; /** \class CBlockPolicyEstimator - * The BlockPolicyEstimator is used for estimating the fee or priority needed + * The BlockPolicyEstimator is used for estimating the feerate needed * for a transaction to be included in a block within a certain number of * blocks. * * At a high level the algorithm works by grouping transactions into buckets - * based on having similar priorities or fees and then tracking how long it + * based on having similar feerates and then tracking how long it * takes transactions in the various buckets to be mined. It operates under - * the assumption that in general transactions of higher fee/priority will be - * included in blocks before transactions of lower fee/priority. So for - * example if you wanted to know what fee you should put on a transaction to + * the assumption that in general transactions of higher feerate will be + * included in blocks before transactions of lower feerate. So for + * example if you wanted to know what feerate you should put on a transaction to * be included in a block within the next 5 blocks, you would start by looking - * at the bucket with the highest fee transactions and verifying that a + * at the bucket with the highest feerate transactions and verifying that a * sufficiently high percentage of them were confirmed within 5 blocks and - * then you would look at the next highest fee bucket, and so on, stopping at - * the last bucket to pass the test. The average fee of transactions in this - * bucket will give you an indication of the lowest fee you can put on a + * then you would look at the next highest feerate bucket, and so on, stopping at + * the last bucket to pass the test. The average feerate of transactions in this + * bucket will give you an indication of the lowest feerate you can put on a * transaction and still have a sufficiently high chance of being confirmed * within your desired 5 blocks. * - * When a transaction enters the mempool or is included within a block we - * decide whether it can be used as a data point for fee estimation, priority - * estimation or neither. If the value of exactly one of those properties was - * below the required minimum it can be used to estimate the other. In - * addition, if a priori our estimation code would indicate that the - * transaction would be much more quickly included in a block because of one - * of the properties compared to the other, we can also decide to use it as - * an estimate for that property. - * - * Here is a brief description of the implementation for fee estimation. - * When a transaction that counts for fee estimation enters the mempool, we + * Here is a brief description of the implementation: + * When a transaction enters the mempool, we * track the height of the block chain at entry. Whenever a block comes in, - * we count the number of transactions in each bucket and the total amount of fee + * we count the number of transactions in each bucket and the total amount of feerate * paid in each bucket. Then we calculate how many blocks Y it took each * transaction to be mined and we track an array of counters in each bucket * for how long it to took transactions to get confirmed from 1 to a max of 25 * and we increment all the counters from Y up to 25. This is because for any * number Z>=Y the transaction was successfully mined within Z blocks. We * want to save a history of this information, so at any time we have a - * counter of the total number of transactions that happened in a given fee + * counter of the total number of transactions that happened in a given feerate * bucket and the total number that were confirmed in each number 1-25 blocks * or less for any bucket. We save this history by keeping an exponentially * decaying moving average of each one of these stats. Furthermore we also * keep track of the number unmined (in mempool) transactions in each bucket * and for how many blocks they have been outstanding and use that to increase - * the number of transactions we've seen in that fee bucket when calculating + * the number of transactions we've seen in that feerate bucket when calculating * an estimate for any number of confirmations below the number of blocks * they've been outstanding. */ /** - * We will instantiate two instances of this class, one to track transactions - * that were included in a block due to fee, and one for tx's included due to - * priority. We will lump transactions into a bucket according to their approximate - * fee or priority and then track how long it took for those txs to be included in a block + * We will instantiate an instance of this class to track transactions that were + * included in a block. We will lump transactions into a bucket according to their + * approximate feerate and then track how long it took for those txs to be included in a block * * The tracking of unconfirmed (mempool) transactions is completely independent of the * historical tracking of transactions that have been confirmed in a block. @@ -80,7 +70,7 @@ class CTxMemPool; class TxConfirmStats { private: - //Define the buckets we will group transactions into (both fee buckets and priority buckets) + //Define the buckets we will group transactions into std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive) std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket @@ -97,16 +87,15 @@ private: // and calculate the totals for the current block to update the moving averages std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X] - // Sum the total priority/fee of all tx's in each bucket + // Sum the total feerate of all tx's in each bucket // Track the historical moving average of this total over blocks std::vector<double> avg; // and calculate the total for the current block to update the moving average std::vector<double> curBlockVal; // Combine the conf counts with tx counts to calculate the confirmation % for each Y,X - // Combine the total value with the tx counts to calculate the avg fee/priority per bucket + // Combine the total value with the tx counts to calculate the avg feerate per bucket - std::string dataTypeString; double decay; // Mempool counts of outstanding transactions @@ -123,9 +112,8 @@ public: * @param defaultBuckets contains the upper limits for the bucket boundaries * @param maxConfirms max number of confirms to track * @param decay how much to decay the historical moving average per block - * @param dataTypeString for logging purposes */ - void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay, std::string dataTypeString); + void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay); /** Clear the state of the curBlock variables to start counting for the new block */ void ClearCurrent(unsigned int nBlockHeight); @@ -133,7 +121,7 @@ public: /** * Record a new transaction data point in the current block stats * @param blocksToConfirm the number of blocks it took this transaction to confirm - * @param val either the fee or the priority when entered of the transaction + * @param val the feerate of the transaction * @warning blocksToConfirm is 1-based and has to be >= 1 */ void Record(int blocksToConfirm, double val); @@ -150,14 +138,14 @@ public: void UpdateMovingAverages(); /** - * Calculate a fee or priority estimate. Find the lowest value bucket (or range of buckets + * Calculate a feerate estimate. Find the lowest value bucket (or range of buckets * to make sure we have enough data points) whose transactions still have sufficient likelihood * of being confirmed within the target number of confirmations * @param confTarget target number of confirmations * @param sufficientTxVal required average number of transactions per block in a bucket range * @param minSuccess the success probability we require - * @param requireGreater return the lowest fee/pri such that all higher values pass minSuccess OR - * return the highest fee/pri such that all lower values fail minSuccess + * @param requireGreater return the lowest feerate such that all higher values pass minSuccess OR + * return the highest feerate such that all lower values fail minSuccess * @param nBlockHeight the current block height */ double EstimateMedianVal(int confTarget, double sufficientTxVal, @@ -184,35 +172,26 @@ static const unsigned int MAX_BLOCK_CONFIRMS = 25; /** Decay of .998 is a half-life of 346 blocks or about 2.4 days */ static const double DEFAULT_DECAY = .998; -/** Require greater than 95% of X fee transactions to be confirmed within Y blocks for X to be big enough */ +/** Require greater than 95% of X feerate transactions to be confirmed within Y blocks for X to be big enough */ static const double MIN_SUCCESS_PCT = .95; -static const double UNLIKELY_PCT = .5; -/** Require an avg of 1 tx in the combined fee bucket per block to have stat significance */ +/** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */ static const double SUFFICIENT_FEETXS = 1; -/** Require only an avg of 1 tx every 5 blocks in the combined pri bucket (way less pri txs) */ -static const double SUFFICIENT_PRITXS = .2; - -// Minimum and Maximum values for tracking fees and priorities +// Minimum and Maximum values for tracking feerates static const double MIN_FEERATE = 10; static const double MAX_FEERATE = 1e7; static const double INF_FEERATE = MAX_MONEY; -static const double MIN_PRIORITY = 10; -static const double MAX_PRIORITY = 1e16; static const double INF_PRIORITY = 1e9 * MAX_MONEY; -// We have to lump transactions into buckets based on fee or priority, but we want to be able -// to give accurate estimates over a large range of potential fees and priorities +// We have to lump transactions into buckets based on feerate, but we want to be able +// to give accurate estimates over a large range of potential feerates // Therefore it makes sense to exponentially space the buckets /** Spacing of FeeRate buckets */ static const double FEE_SPACING = 1.1; -/** Spacing of Priority buckets */ -static const double PRI_SPACING = 2; - /** - * We want to be able to estimate fees or priorities that are needed on tx's to be included in + * We want to be able to estimate feerates that are needed on tx's to be included in * a certain number of blocks. Every time a block is added to the best chain, this class records * stats on the transactions included in that block */ @@ -235,27 +214,26 @@ public: /** Remove a transaction from the mempool tracking stats*/ void removeTx(uint256 hash); - /** Is this transaction likely included in a block because of its fee?*/ - bool isFeeDataPoint(const CFeeRate &fee, double pri); - - /** Is this transaction likely included in a block because of its priority?*/ - bool isPriDataPoint(const CFeeRate &fee, double pri); - - /** Return a fee estimate */ + /** Return a feerate estimate */ CFeeRate estimateFee(int confTarget); - /** Estimate fee rate needed to get be included in a block within + /** Estimate feerate needed to get be included in a block within * confTarget blocks. If no answer can be given at confTarget, return an * estimate at the lowest target where one can be given. */ CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool); - /** Return a priority estimate */ + /** Return a priority estimate. + * DEPRECATED + * Returns -1 + */ double estimatePriority(int confTarget); /** Estimate priority needed to get be included in a block within - * confTarget blocks. If no answer can be given at confTarget, return an - * estimate at the lowest target where one can be given. + * confTarget blocks. + * DEPRECATED + * Returns -1 unless mempool is currently limited then returns INF_PRIORITY + * answerFoundAtTarget is set to confTarget */ double estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool); @@ -263,29 +241,23 @@ public: void Write(CAutoFile& fileout); /** Read estimation data from a file */ - void Read(CAutoFile& filein); + void Read(CAutoFile& filein, int nFileVersion); private: CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main - double minTrackedPriority; //!< Set to AllowFreeThreshold unsigned int nBestSeenHeight; struct TxStatsInfo { - TxConfirmStats *stats; unsigned int blockHeight; unsigned int bucketIndex; - TxStatsInfo() : stats(NULL), blockHeight(0), bucketIndex(0) {} + TxStatsInfo() : blockHeight(0), bucketIndex(0) {} }; // map of txids to information about that transaction std::map<uint256, TxStatsInfo> mapMemPoolTxs; /** Classes to track historical data on transaction confirmations */ - TxConfirmStats feeStats, priStats; - - /** Breakpoints to help determine whether a transaction was confirmed by priority or Fee */ - CFeeRate feeLikely, feeUnlikely; - double priLikely, priUnlikely; + TxConfirmStats feeStats; }; class FeeFilterRounder diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index ae42b2bd74..0c71a079fb 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2016 The Bitcoin developers +// Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -7,7 +7,7 @@ #include "policy/policy.h" -#include "main.h" +#include "validation.h" #include "tinyformat.h" #include "util.h" #include "utilstrencodings.h" @@ -66,7 +66,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes // Extremely large transactions with lots of inputs can cost the network // almost as much to process as they cost the sender in fees, because // computing signature hashes is O(ninputs*txsize). Limiting transactions - // to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks. + // to MAX_STANDARD_TX_WEIGHT mitigates CPU exhaustion attacks. unsigned int sz = GetTransactionWeight(tx); if (sz >= MAX_STANDARD_TX_WEIGHT) { reason = "tx-size"; diff --git a/src/policy/policy.h b/src/policy/policy.h index 814e6c0b6f..764ee27806 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2016 The Bitcoin developers +// Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp index 133cff611d..d9b47e71bb 100644 --- a/src/policy/rbf.cpp +++ b/src/policy/rbf.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2016 The Bitcoin developers +// Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/policy/rbf.h b/src/policy/rbf.h index 5a711dba07..139aec5760 100644 --- a/src/policy/rbf.h +++ b/src/policy/rbf.h @@ -1,4 +1,4 @@ -// Copyright (c) 2016 The Bitcoin developers +// Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/primitives/block.cpp b/src/primitives/block.cpp index 0e6ab4dd71..95bd2211f9 100644 --- a/src/primitives/block.cpp +++ b/src/primitives/block.cpp @@ -27,7 +27,7 @@ std::string CBlock::ToString() const vtx.size()); for (unsigned int i = 0; i < vtx.size(); i++) { - s << " " << vtx[i].ToString() << "\n"; + s << " " << vtx[i]->ToString() << "\n"; } return s.str(); } diff --git a/src/primitives/block.h b/src/primitives/block.h index 72dfed985a..b037fc839c 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -36,7 +36,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(hashPrevBlock); READWRITE(hashMerkleRoot); @@ -73,7 +73,7 @@ class CBlock : public CBlockHeader { public: // network and disk - std::vector<CTransaction> vtx; + std::vector<CTransactionRef> vtx; // memory only mutable bool fChecked; @@ -92,7 +92,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(*(CBlockHeader*)this); READWRITE(vtx); } @@ -137,8 +137,9 @@ struct CBlockLocator ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(vHave); } diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index 4afbe99fd3..11d7eace55 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -49,11 +49,6 @@ CTxOut::CTxOut(const CAmount& nValueIn, CScript scriptPubKeyIn) scriptPubKey = scriptPubKeyIn; } -uint256 CTxOut::GetHash() const -{ - return SerializeHash(*this); -} - std::string CTxOut::ToString() const { return strprintf("CTxOut(nValue=%d.%08d, scriptPubKey=%s)", nValue / COIN, nValue % COIN, HexStr(scriptPubKey).substr(0, 30)); @@ -67,9 +62,9 @@ uint256 CMutableTransaction::GetHash() const return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); } -void CTransaction::UpdateHash() const +uint256 CTransaction::ComputeHash() const { - *const_cast<uint256*>(&hash) = SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); } uint256 CTransaction::GetWitnessHash() const @@ -77,21 +72,10 @@ uint256 CTransaction::GetWitnessHash() const return SerializeHash(*this, SER_GETHASH, 0); } -CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0) { } - -CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), wit(tx.wit), nLockTime(tx.nLockTime) { - UpdateHash(); -} - -CTransaction& CTransaction::operator=(const CTransaction &tx) { - *const_cast<int*>(&nVersion) = tx.nVersion; - *const_cast<std::vector<CTxIn>*>(&vin) = tx.vin; - *const_cast<std::vector<CTxOut>*>(&vout) = tx.vout; - *const_cast<CTxWitness*>(&wit) = tx.wit; - *const_cast<unsigned int*>(&nLockTime) = tx.nLockTime; - *const_cast<uint256*>(&hash) = tx.hash; - return *this; -} +/* For backward compatibility, the hash is initialized to 0. TODO: remove the need for this default constructor entirely. */ +CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0), hash() {} +CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), wit(tx.wit), nLockTime(tx.nLockTime), hash(ComputeHash()) {} +CTransaction::CTransaction(CMutableTransaction &&tx) : nVersion(tx.nVersion), vin(std::move(tx.vin)), vout(std::move(tx.vout)), wit(std::move(tx.wit)), nLockTime(tx.nLockTime), hash(ComputeHash()) {} CAmount CTransaction::GetValueOut() const { diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index 16c2e5c454..66fefafef5 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -28,7 +28,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(hash); READWRITE(n); } @@ -104,7 +104,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(prevout); READWRITE(*(CScriptBase*)(&scriptSig)); READWRITE(nSequence); @@ -144,7 +144,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(nValue); READWRITE(*(CScriptBase*)(&scriptPubKey)); } @@ -160,8 +160,6 @@ public: return (nValue == -1); } - uint256 GetHash() const; - CAmount GetDustThreshold(const CFeeRate &minRelayTxFee) const { // "Dust" is defined in terms of CTransaction::minRelayTxFee, @@ -179,7 +177,7 @@ public: if (scriptPubKey.IsUnspendable()) return 0; - size_t nSize = GetSerializeSize(SER_DISK, 0); + size_t nSize = GetSerializeSize(*this, SER_DISK, 0); int witnessversion = 0; std::vector<unsigned char> witnessprogram; @@ -221,7 +219,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(scriptWitness.stack); } @@ -257,7 +255,7 @@ public: } template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { for (size_t n = 0; n < vtxinwit.size(); n++) { READWRITE(vtxinwit[n]); @@ -288,73 +286,81 @@ struct CMutableTransaction; * - CTxWitness wit; * - uint32_t nLockTime */ -template<typename Stream, typename Operation, typename TxType> -inline void SerializeTransaction(TxType& tx, Stream& s, Operation ser_action, int nType, int nVersion) { - const bool fAllowWitness = !(nVersion & SERIALIZE_TRANSACTION_NO_WITNESS); +template<typename Stream, typename TxType> +inline void UnserializeTransaction(TxType& tx, Stream& s) { + const bool fAllowWitness = !(s.GetVersion() & SERIALIZE_TRANSACTION_NO_WITNESS); - READWRITE(*const_cast<int32_t*>(&tx.nVersion)); + s >> tx.nVersion; unsigned char flags = 0; - if (ser_action.ForRead()) { - const_cast<std::vector<CTxIn>*>(&tx.vin)->clear(); - const_cast<std::vector<CTxOut>*>(&tx.vout)->clear(); - const_cast<CTxWitness*>(&tx.wit)->SetNull(); - /* Try to read the vin. In case the dummy is there, this will be read as an empty vector. */ - READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin)); - if (tx.vin.size() == 0 && fAllowWitness) { - /* We read a dummy or an empty vin. */ - READWRITE(flags); - if (flags != 0) { - READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin)); - READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout)); - } - } else { - /* We read a non-empty vin. Assume a normal vout follows. */ - READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout)); - } - if ((flags & 1) && fAllowWitness) { - /* The witness flag is present, and we support witnesses. */ - flags ^= 1; - const_cast<CTxWitness*>(&tx.wit)->vtxinwit.resize(tx.vin.size()); - READWRITE(tx.wit); - } - if (flags) { - /* Unknown flag in the serialization */ - throw std::ios_base::failure("Unknown transaction optional data"); + tx.vin.clear(); + tx.vout.clear(); + tx.wit.SetNull(); + /* Try to read the vin. In case the dummy is there, this will be read as an empty vector. */ + s >> tx.vin; + if (tx.vin.size() == 0 && fAllowWitness) { + /* We read a dummy or an empty vin. */ + s >> flags; + if (flags != 0) { + s >> tx.vin; + s >> tx.vout; } } else { - // Consistency check - assert(tx.wit.vtxinwit.size() <= tx.vin.size()); - if (fAllowWitness) { - /* Check whether witnesses need to be serialized. */ - if (!tx.wit.IsNull()) { - flags |= 1; - } - } - if (flags) { - /* Use extended format in case witnesses are to be serialized. */ - std::vector<CTxIn> vinDummy; - READWRITE(vinDummy); - READWRITE(flags); + /* We read a non-empty vin. Assume a normal vout follows. */ + s >> tx.vout; + } + if ((flags & 1) && fAllowWitness) { + /* The witness flag is present, and we support witnesses. */ + flags ^= 1; + tx.wit.vtxinwit.resize(tx.vin.size()); + s >> tx.wit; + } + if (flags) { + /* Unknown flag in the serialization */ + throw std::ios_base::failure("Unknown transaction optional data"); + } + s >> tx.nLockTime; +} + +template<typename Stream, typename TxType> +inline void SerializeTransaction(const TxType& tx, Stream& s) { + const bool fAllowWitness = !(s.GetVersion() & SERIALIZE_TRANSACTION_NO_WITNESS); + + s << tx.nVersion; + unsigned char flags = 0; + // Consistency check + assert(tx.wit.vtxinwit.size() <= tx.vin.size()); + if (fAllowWitness) { + /* Check whether witnesses need to be serialized. */ + if (!tx.wit.IsNull()) { + flags |= 1; } - READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin)); - READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout)); - if (flags & 1) { - const_cast<CTxWitness*>(&tx.wit)->vtxinwit.resize(tx.vin.size()); - READWRITE(tx.wit); + } + if (flags) { + /* Use extended format in case witnesses are to be serialized. */ + std::vector<CTxIn> vinDummy; + s << vinDummy; + s << flags; + } + s << tx.vin; + s << tx.vout; + if (flags & 1) { + for (size_t i = 0; i < tx.vin.size(); i++) { + if (i < tx.wit.vtxinwit.size()) { + s << tx.wit.vtxinwit[i]; + } else { + s << CTxInWitness(); + } } } - READWRITE(*const_cast<uint32_t*>(&tx.nLockTime)); + s << tx.nLockTime; } + /** The basic transaction that is broadcasted on the network and contained in * blocks. A transaction can contain multiple inputs and outputs. */ class CTransaction { -private: - /** Memory only. */ - const uint256 hash; - public: // Default transaction version. static const int32_t CURRENT_VERSION=1; @@ -376,24 +382,30 @@ public: CTxWitness wit; // Not const: can change without invalidating the txid cache const uint32_t nLockTime; +private: + /** Memory only. */ + const uint256 hash; + + uint256 ComputeHash() const; + +public: /** Construct a CTransaction that qualifies as IsNull() */ CTransaction(); /** Convert a CMutableTransaction into a CTransaction. */ CTransaction(const CMutableTransaction &tx); + CTransaction(CMutableTransaction &&tx); - CTransaction& operator=(const CTransaction& tx); - - ADD_SERIALIZE_METHODS; - - template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - SerializeTransaction(*this, s, ser_action, nType, nVersion); - if (ser_action.ForRead()) { - UpdateHash(); - } + template <typename Stream> + inline void Serialize(Stream& s) const { + SerializeTransaction(*this, s); } + /** This deserializing constructor is provided instead of an Unserialize method. + * Unserialize is not possible, since it would require overwriting const fields. */ + template <typename Stream> + CTransaction(deserialize_type, Stream& s) : CTransaction(CMutableTransaction(deserialize, s)) {} + bool IsNull() const { return vin.empty() && vout.empty(); } @@ -415,7 +427,7 @@ public: // Compute modified tx size for priority calculation (optionally given tx size) unsigned int CalculateModifiedSize(unsigned int nTxSize=0) const; - + /** * Get the total transaction size in bytes, including witness data. * "Total Size" defined in BIP141 and BIP144. @@ -439,8 +451,6 @@ public: } std::string ToString() const; - - void UpdateHash() const; }; /** A mutable version of CTransaction. */ @@ -455,11 +465,20 @@ struct CMutableTransaction CMutableTransaction(); CMutableTransaction(const CTransaction& tx); - ADD_SERIALIZE_METHODS; + template <typename Stream> + inline void Serialize(Stream& s) const { + SerializeTransaction(*this, s); + } - template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - SerializeTransaction(*this, s, ser_action, nType, nVersion); + + template <typename Stream> + inline void Unserialize(Stream& s) { + UnserializeTransaction(*this, s); + } + + template <typename Stream> + CMutableTransaction(deserialize_type, Stream& s) { + Unserialize(s); } /** Compute the hash of this CMutableTransaction. This is computed on the @@ -468,6 +487,12 @@ struct CMutableTransaction uint256 GetHash() const; }; +typedef std::shared_ptr<const CTransaction> CTransactionRef; +static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); } +template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); } +static inline CTransactionRef MakeTransactionRef(const CTransactionRef& txIn) { return txIn; } +static inline CTransactionRef MakeTransactionRef(CTransactionRef&& txIn) { return std::move(txIn); } + /** Compute the weight of a transaction, as defined by BIP 141 */ int64_t GetTransactionWeight(const CTransaction &tx); diff --git a/src/protocol.cpp b/src/protocol.cpp index 54ad62b1a2..87d6e06848 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -181,7 +181,11 @@ std::string CInv::GetCommand() const std::string CInv::ToString() const { - return strprintf("%s %s", GetCommand(), hash.ToString()); + try { + return strprintf("%s %s", GetCommand(), hash.ToString()); + } catch(const std::out_of_range &) { + return strprintf("0x%08x %s", type, hash.ToString()); + } } const std::vector<std::string> &getAllNetMessageTypes() diff --git a/src/protocol.h b/src/protocol.h index d19e0d3a5e..dea409c5b7 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -48,7 +48,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(FLATDATA(pchMessageStart)); READWRITE(FLATDATA(pchCommand)); @@ -261,7 +261,7 @@ enum ServiceFlags : uint64_t { // Bitcoin Core nodes used to support this by default, without advertising this bit, // but no longer do as of protocol version 70011 (= NO_BLOOM_VERSION) NODE_BLOOM = (1 << 2), - // Indicates that a node can be asked for blocks and transactions including + // NODE_WITNESS indicates that a node can be asked for blocks and transactions including // witness data. NODE_WITNESS = (1 << 3), // NODE_XTHIN means the node supports Xtreme Thinblocks @@ -289,14 +289,15 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { if (ser_action.ForRead()) Init(); - if (nType & SER_DISK) + int nVersion = s.GetVersion(); + if (s.GetType() & SER_DISK) READWRITE(nVersion); - if ((nType & SER_DISK) || - (nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH))) + if ((s.GetType() & SER_DISK) || + (nVersion >= CADDR_TIME_VERSION && !(s.GetType() & SER_GETHASH))) READWRITE(nTime); uint64_t nServicesInt = nServices; READWRITE(nServicesInt); @@ -343,7 +344,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(type); READWRITE(hash); diff --git a/src/pubkey.h b/src/pubkey.h index 3a554877f8..9499862210 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -116,19 +116,15 @@ public: } //! Implement serialization, as if this was a byte vector. - unsigned int GetSerializeSize(int nType, int nVersion) const - { - return size() + 1; - } template <typename Stream> - void Serialize(Stream& s, int nType, int nVersion) const + void Serialize(Stream& s) const { unsigned int len = size(); ::WriteCompactSize(s, len); s.write((char*)vch, len); } template <typename Stream> - void Unserialize(Stream& s, int nType, int nVersion) + void Unserialize(Stream& s) { unsigned int len = ::ReadCompactSize(s); if (len <= 65) { @@ -214,12 +210,13 @@ struct CExtPubKey { void Decode(const unsigned char code[BIP32_EXTKEY_SIZE]); bool Derive(CExtPubKey& out, unsigned int nChild) const; - unsigned int GetSerializeSize(int nType, int nVersion) const + void Serialize(CSizeComputer& s) const { - return BIP32_EXTKEY_SIZE+1; //add one byte for the size (compact int) + // Optimized implementation for ::GetSerializeSize that avoids copying. + s.seek(BIP32_EXTKEY_SIZE + 1); // add one byte for the size (compact int) } template <typename Stream> - void Serialize(Stream& s, int nType, int nVersion) const + void Serialize(Stream& s) const { unsigned int len = BIP32_EXTKEY_SIZE; ::WriteCompactSize(s, len); @@ -228,7 +225,7 @@ struct CExtPubKey { s.write((const char *)&code[0], len); } template <typename Stream> - void Unserialize(Stream& s, int nType, int nVersion) + void Unserialize(Stream& s) { unsigned int len = ::ReadCompactSize(s); unsigned char code[BIP32_EXTKEY_SIZE]; diff --git a/src/qt/addressbookpage.cpp b/src/qt/addressbookpage.cpp index 58cf4dede0..6076837fde 100644 --- a/src/qt/addressbookpage.cpp +++ b/src/qt/addressbookpage.cpp @@ -83,7 +83,7 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, deleteAction = new QAction(ui->deleteAddress->text(), this); // Build context menu - contextMenu = new QMenu(); + contextMenu = new QMenu(this); contextMenu->addAction(copyAddressAction); contextMenu->addAction(copyLabelAction); contextMenu->addAction(editAction); diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp index 6e11e23904..fe53d89ba9 100644 --- a/src/qt/bantablemodel.cpp +++ b/src/qt/bantablemodel.cpp @@ -87,7 +87,7 @@ BanTableModel::BanTableModel(ClientModel *parent) : clientModel(parent) { columns << tr("IP/Netmask") << tr("Banned Until"); - priv = new BanTablePriv(); + priv.reset(new BanTablePriv()); // default to unsorted priv->sortColumn = -1; @@ -95,6 +95,11 @@ BanTableModel::BanTableModel(ClientModel *parent) : refresh(); } +BanTableModel::~BanTableModel() +{ + // Intentionally left empty +} + int BanTableModel::rowCount(const QModelIndex &parent) const { Q_UNUSED(parent); diff --git a/src/qt/bantablemodel.h b/src/qt/bantablemodel.h index fe9600ac0b..3c03d05c05 100644 --- a/src/qt/bantablemodel.h +++ b/src/qt/bantablemodel.h @@ -40,6 +40,7 @@ class BanTableModel : public QAbstractTableModel public: explicit BanTableModel(ClientModel *parent = 0); + ~BanTableModel(); void startAutoRefresh(); void stopAutoRefresh(); @@ -66,7 +67,7 @@ public Q_SLOTS: private: ClientModel *clientModel; QStringList columns; - BanTablePriv *priv; + std::unique_ptr<BanTablePriv> priv; }; #endif // BITCOIN_QT_BANTABLEMODEL_H diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 9986af4957..4f48e21a22 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -245,6 +245,7 @@ private: #endif int returnValue; const PlatformStyle *platformStyle; + std::unique_ptr<QWidget> shutdownWindow; void startThread(); }; @@ -267,7 +268,22 @@ void BitcoinCore::initialize() try { qDebug() << __func__ << ": Running AppInit2 in thread"; - int rv = AppInit2(threadGroup, scheduler); + if (!AppInitBasicSetup()) + { + Q_EMIT initializeResult(false); + return; + } + if (!AppInitParameterInteraction()) + { + Q_EMIT initializeResult(false); + return; + } + if (!AppInitSanityChecks()) + { + Q_EMIT initializeResult(false); + return; + } + int rv = AppInitMain(threadGroup, scheduler); Q_EMIT initializeResult(rv); } catch (const std::exception& e) { handleRunawayException(&e); @@ -365,9 +381,8 @@ void BitcoinApplication::createWindow(const NetworkStyle *networkStyle) void BitcoinApplication::createSplashScreen(const NetworkStyle *networkStyle) { SplashScreen *splash = new SplashScreen(0, networkStyle); - // We don't hold a direct pointer to the splash screen after creation, so use - // Qt::WA_DeleteOnClose to make sure that the window will be deleted eventually. - splash->setAttribute(Qt::WA_DeleteOnClose); + // We don't hold a direct pointer to the splash screen after creation, but the splash + // screen will take care of deleting itself when slotFinish happens. splash->show(); connect(this, SIGNAL(splashFinished(QWidget*)), splash, SLOT(slotFinish(QWidget*))); connect(this, SIGNAL(requestedShutdown()), splash, SLOT(close())); @@ -409,6 +424,11 @@ void BitcoinApplication::requestInitialize() void BitcoinApplication::requestShutdown() { + // Show a simple window indicating shutdown status + // Do this first as some of the steps may take some time below, + // for example the RPC console may still be executing a command. + shutdownWindow.reset(ShutdownWindow::showShutdownWindow(window)); + qDebug() << __func__ << ": Requesting shutdown"; startThread(); window->hide(); @@ -423,9 +443,6 @@ void BitcoinApplication::requestShutdown() delete clientModel; clientModel = 0; - // Show a simple window indicating shutdown status - ShutdownWindow::showShutdownWindow(window); - // Request shutdown from core thread Q_EMIT requestedShutdown(); } @@ -496,7 +513,7 @@ void BitcoinApplication::shutdownResult(int retval) void BitcoinApplication::handleRunawayException(const QString &message) { QMessageBox::critical(0, "Runaway exception", BitcoinGUI::tr("A fatal error occurred. Bitcoin can no longer continue safely and will quit.") + QString("\n\n") + message); - ::exit(1); + ::exit(EXIT_FAILURE); } WId BitcoinApplication::getMainWinId() const @@ -573,13 +590,13 @@ int main(int argc, char *argv[]) { HelpMessageDialog help(NULL, mapArgs.count("-version")); help.showOrPrint(); - return 1; + return EXIT_SUCCESS; } /// 5. Now that settings and translations are available, ask user for data directory // User language is set up: pick a data directory if (!Intro::pickDataDirectory()) - return 0; + return EXIT_SUCCESS; /// 6. Determine availability of data directory and parse bitcoin.conf /// - Do not call GetDataDir(true) before this step finishes @@ -587,14 +604,14 @@ int main(int argc, char *argv[]) { QMessageBox::critical(0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: Specified data directory \"%1\" does not exist.").arg(QString::fromStdString(mapArgs["-datadir"]))); - return 1; + return EXIT_FAILURE; } try { ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME), mapArgs, mapMultiArgs); } catch (const std::exception& e) { QMessageBox::critical(0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: Cannot parse configuration file: %1. Only use key=value syntax.").arg(e.what())); - return false; + return EXIT_FAILURE; } /// 7. Determine network (and switch to network specific options) @@ -608,7 +625,7 @@ int main(int argc, char *argv[]) SelectParams(ChainNameFromCommandLine()); } catch(std::exception &e) { QMessageBox::critical(0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: %1").arg(e.what())); - return 1; + return EXIT_FAILURE; } #ifdef ENABLE_WALLET // Parse URIs on command line -- this can affect Params() @@ -630,7 +647,7 @@ int main(int argc, char *argv[]) // - Do this after creating app and setting up translations, so errors are // translated properly. if (PaymentServer::ipcSendCommandLine()) - exit(0); + exit(EXIT_SUCCESS); // Start up the payment server early, too, so impatient users that click on // bitcoin: links repeatedly have their payment requests routed to this process: diff --git a/src/qt/bitcoin.qrc b/src/qt/bitcoin.qrc index ca5b1fa673..451d391237 100644 --- a/src/qt/bitcoin.qrc +++ b/src/qt/bitcoin.qrc @@ -52,6 +52,7 @@ <file alias="transaction_abandoned">res/icons/transaction_abandoned.png</file> <file alias="hd_enabled">res/icons/hd_enabled.png</file> <file alias="hd_disabled">res/icons/hd_disabled.png</file> + <file alias="network_disabled">res/icons/network_disabled.png</file> </qresource> <qresource prefix="/movies"> <file alias="spinner-000">res/movies/spinner-000.png</file> diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index af767aa6c6..651ff84293 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -46,7 +46,6 @@ #include <QMenuBar> #include <QMessageBox> #include <QMimeData> -#include <QProgressBar> #include <QProgressDialog> #include <QSettings> #include <QShortcut> @@ -86,7 +85,7 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle * unitDisplayControl(0), labelWalletEncryptionIcon(0), labelWalletHDStatusIcon(0), - labelConnectionsIcon(0), + connectionsControl(0), labelBlocksIcon(0), progressBarLabel(0), progressBar(0), @@ -199,8 +198,8 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle * unitDisplayControl = new UnitDisplayStatusBarControl(platformStyle); labelWalletEncryptionIcon = new QLabel(); labelWalletHDStatusIcon = new QLabel(); - labelConnectionsIcon = new QLabel(); - labelBlocksIcon = new QLabel(); + connectionsControl = new GUIUtil::ClickableLabel(); + labelBlocksIcon = new GUIUtil::ClickableLabel(); if(enableWallet) { frameBlocksLayout->addStretch(); @@ -210,7 +209,7 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle * frameBlocksLayout->addWidget(labelWalletHDStatusIcon); } frameBlocksLayout->addStretch(); - frameBlocksLayout->addWidget(labelConnectionsIcon); + frameBlocksLayout->addWidget(connectionsControl); frameBlocksLayout->addStretch(); frameBlocksLayout->addWidget(labelBlocksIcon); frameBlocksLayout->addStretch(); @@ -244,10 +243,15 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle * // Subscribe to notifications from core subscribeToCoreSignals(); + connect(connectionsControl, SIGNAL(clicked(QPoint)), this, SLOT(toggleNetworkActive())); + modalOverlay = new ModalOverlay(this->centralWidget()); #ifdef ENABLE_WALLET - if(enableWallet) + if(enableWallet) { connect(walletFrame, SIGNAL(requestedSyncWarningInfo()), this, SLOT(showModalOverlay())); + connect(labelBlocksIcon, SIGNAL(clicked(QPoint)), this, SLOT(showModalOverlay())); + connect(progressBar, SIGNAL(clicked(QPoint)), this, SLOT(showModalOverlay())); + } #endif } @@ -469,8 +473,9 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel) createTrayIconMenu(); // Keep up to date with client - setNumConnections(_clientModel->getNumConnections()); + updateNetworkState(); connect(_clientModel, SIGNAL(numConnectionsChanged(int)), this, SLOT(setNumConnections(int))); + connect(_clientModel, SIGNAL(networkActiveChanged(bool)), this, SLOT(setNetworkActive(bool))); setNumBlocks(_clientModel->getNumBlocks(), _clientModel->getLastBlockDate(), _clientModel->getVerificationProgress(NULL), false); connect(_clientModel, SIGNAL(numBlocksChanged(int,QDateTime,double,bool)), this, SLOT(setNumBlocks(int,QDateTime,double,bool))); @@ -509,6 +514,12 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel) // Disable context menu on tray icon trayIconMenu->clear(); } + // Propagate cleared model to child objects + rpcConsole->setClientModel(nullptr); +#ifdef ENABLE_WALLET + walletFrame->setClientModel(nullptr); +#endif // ENABLE_WALLET + unitDisplayControl->setOptionsModel(nullptr); } } @@ -698,8 +709,9 @@ void BitcoinGUI::gotoVerifyMessageTab(QString addr) } #endif // ENABLE_WALLET -void BitcoinGUI::setNumConnections(int count) +void BitcoinGUI::updateNetworkState() { + int count = clientModel->getNumConnections(); QString icon; switch(count) { @@ -709,21 +721,41 @@ void BitcoinGUI::setNumConnections(int count) case 7: case 8: case 9: icon = ":/icons/connect_3"; break; default: icon = ":/icons/connect_4"; break; } - labelConnectionsIcon->setPixmap(platformStyle->SingleColorIcon(icon).pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE)); - labelConnectionsIcon->setToolTip(tr("%n active connection(s) to Bitcoin network", "", count)); + + QString tooltip; + + if (clientModel->getNetworkActive()) { + tooltip = tr("%n active connection(s) to Bitcoin network", "", count) + QString(".<br>") + tr("Click to disable network activity."); + } else { + tooltip = tr("Network activity disabled.") + QString("<br>") + tr("Click to enable network activity again."); + icon = ":/icons/network_disabled"; + } + + // Don't word-wrap this (fixed-width) tooltip + tooltip = QString("<nobr>") + tooltip + QString("</nobr>"); + connectionsControl->setToolTip(tooltip); + + connectionsControl->setPixmap(platformStyle->SingleColorIcon(icon).pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE)); +} + +void BitcoinGUI::setNumConnections(int count) +{ + updateNetworkState(); +} + +void BitcoinGUI::setNetworkActive(bool networkActive) +{ + updateNetworkState(); } void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool header) { if (modalOverlay) { - if (header) { - /* use clientmodels getHeaderTipHeight and getHeaderTipTime because the NotifyHeaderTip signal does not fire when updating the best header */ - modalOverlay->setKnownBestHeight(clientModel->getHeaderTipHeight(), QDateTime::fromTime_t(clientModel->getHeaderTipTime())); - } - else { + if (header) + modalOverlay->setKnownBestHeight(count, blockDate); + else modalOverlay->tipUpdate(count, blockDate, nVerificationProgress); - } } if (!clientModel) return; @@ -1106,8 +1138,8 @@ void BitcoinGUI::setTrayIconVisible(bool fHideTrayIcon) void BitcoinGUI::showModalOverlay() { - if (modalOverlay) - modalOverlay->showHide(false, true); + if (modalOverlay && (progressBar->isVisible() || modalOverlay->isLayerVisible())) + modalOverlay->toggleVisibility(); } static bool ThreadSafeMessageBox(BitcoinGUI *gui, const std::string& message, const std::string& caption, unsigned int style) @@ -1141,6 +1173,13 @@ void BitcoinGUI::unsubscribeFromCoreSignals() uiInterface.ThreadSafeQuestion.disconnect(boost::bind(ThreadSafeMessageBox, this, _1, _3, _4)); } +void BitcoinGUI::toggleNetworkActive() +{ + if (clientModel) { + clientModel->setNetworkActive(!clientModel->getNetworkActive()); + } +} + UnitDisplayStatusBarControl::UnitDisplayStatusBarControl(const PlatformStyle *platformStyle) : optionsModel(0), menu(0) @@ -1168,7 +1207,7 @@ void UnitDisplayStatusBarControl::mousePressEvent(QMouseEvent *event) /** Creates context menu, its actions, and wires up all the relevant signals for mouse events. */ void UnitDisplayStatusBarControl::createContextMenu() { - menu = new QMenu(); + menu = new QMenu(this); Q_FOREACH(BitcoinUnits::Unit u, BitcoinUnits::availableUnits()) { QAction *menuAction = new QAction(QString(BitcoinUnits::name(u)), this); diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index 0eaa44b263..59540bfe6b 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -85,7 +85,7 @@ private: UnitDisplayStatusBarControl *unitDisplayControl; QLabel *labelWalletEncryptionIcon; QLabel *labelWalletHDStatusIcon; - QLabel *labelConnectionsIcon; + QLabel *connectionsControl; QLabel *labelBlocksIcon; QLabel *progressBarLabel; QProgressBar *progressBar; @@ -146,6 +146,9 @@ private: /** Disconnect core signals from GUI client */ void unsubscribeFromCoreSignals(); + /** Update UI with latest network info from model. */ + void updateNetworkState(); + Q_SIGNALS: /** Signal raised when a URI was entered or dragged to the GUI */ void receivedURI(const QString &uri); @@ -153,6 +156,8 @@ Q_SIGNALS: public Q_SLOTS: /** Set number of connections shown in the UI */ void setNumConnections(int count); + /** Set network state shown in the UI */ + void setNetworkActive(bool networkActive); /** Set number of blocks and last block date shown in the UI */ void setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers); @@ -232,6 +237,9 @@ private Q_SLOTS: /** When hideTrayIcon setting is changed in OptionsModel hide or show the icon accordingly. */ void setTrayIconVisible(bool); + /** Toggle networking */ + void toggleNetworkActive(); + void showModalOverlay(); }; diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index 87704c641d..e63a9cc7b8 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -6,11 +6,13 @@ #include "bantablemodel.h" #include "guiconstants.h" +#include "guiutil.h" #include "peertablemodel.h" #include "chainparams.h" #include "checkpoints.h" #include "clientversion.h" +#include "validation.h" #include "net.h" #include "txmempool.h" #include "ui_interface.h" @@ -144,6 +146,11 @@ void ClientModel::updateNumConnections(int numConnections) Q_EMIT numConnectionsChanged(numConnections); } +void ClientModel::updateNetworkActive(bool networkActive) +{ + Q_EMIT networkActiveChanged(networkActive); +} + void ClientModel::updateAlert() { Q_EMIT alertsChanged(getStatusBarWarnings()); @@ -166,6 +173,21 @@ enum BlockSource ClientModel::getBlockSource() const return BLOCK_SOURCE_NONE; } +void ClientModel::setNetworkActive(bool active) +{ + if (g_connman) { + g_connman->SetNetworkActive(active); + } +} + +bool ClientModel::getNetworkActive() const +{ + if (g_connman) { + return g_connman->GetNetworkActive(); + } + return false; +} + QString ClientModel::getStatusBarWarnings() const { return QString::fromStdString(GetWarnings("gui")); @@ -208,7 +230,7 @@ QString ClientModel::formatClientStartupTime() const QString ClientModel::dataDir() const { - return QString::fromStdString(GetDataDir().string()); + return GUIUtil::boostPathToQString(GetDataDir()); } void ClientModel::updateBanlist() @@ -232,6 +254,12 @@ static void NotifyNumConnectionsChanged(ClientModel *clientmodel, int newNumConn Q_ARG(int, newNumConnections)); } +static void NotifyNetworkActiveChanged(ClientModel *clientmodel, bool networkActive) +{ + QMetaObject::invokeMethod(clientmodel, "updateNetworkActive", Qt::QueuedConnection, + Q_ARG(bool, networkActive)); +} + static void NotifyAlertChanged(ClientModel *clientmodel) { qDebug() << "NotifyAlertChanged"; @@ -272,6 +300,7 @@ void ClientModel::subscribeToCoreSignals() // Connect signals to client uiInterface.ShowProgress.connect(boost::bind(ShowProgress, this, _1, _2)); uiInterface.NotifyNumConnectionsChanged.connect(boost::bind(NotifyNumConnectionsChanged, this, _1)); + uiInterface.NotifyNetworkActiveChanged.connect(boost::bind(NotifyNetworkActiveChanged, this, _1)); uiInterface.NotifyAlertChanged.connect(boost::bind(NotifyAlertChanged, this)); uiInterface.BannedListChanged.connect(boost::bind(BannedListChanged, this)); uiInterface.NotifyBlockTip.connect(boost::bind(BlockTipChanged, this, _1, _2, false)); @@ -283,6 +312,7 @@ void ClientModel::unsubscribeFromCoreSignals() // Disconnect signals from client uiInterface.ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2)); uiInterface.NotifyNumConnectionsChanged.disconnect(boost::bind(NotifyNumConnectionsChanged, this, _1)); + uiInterface.NotifyNetworkActiveChanged.disconnect(boost::bind(NotifyNetworkActiveChanged, this, _1)); uiInterface.NotifyAlertChanged.disconnect(boost::bind(NotifyAlertChanged, this)); uiInterface.BannedListChanged.disconnect(boost::bind(BannedListChanged, this)); uiInterface.NotifyBlockTip.disconnect(boost::bind(BlockTipChanged, this, _1, _2, false)); diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h index 3fd8404cbb..a641401425 100644 --- a/src/qt/clientmodel.h +++ b/src/qt/clientmodel.h @@ -68,6 +68,10 @@ public: bool inInitialBlockDownload() const; //! Return true if core is importing blocks enum BlockSource getBlockSource() const; + //! Return true if network activity in core is enabled + bool getNetworkActive() const; + //! Toggle network activity state in core + void setNetworkActive(bool active); //! Return warnings to be displayed in status bar QString getStatusBarWarnings() const; @@ -91,6 +95,7 @@ Q_SIGNALS: void numConnectionsChanged(int count); void numBlocksChanged(int count, const QDateTime& blockDate, double nVerificationProgress, bool header); void mempoolSizeChanged(long count, size_t mempoolSizeInBytes); + void networkActiveChanged(bool networkActive); void alertsChanged(const QString &warnings); void bytesChanged(quint64 totalBytesIn, quint64 totalBytesOut); @@ -103,6 +108,7 @@ Q_SIGNALS: public Q_SLOTS: void updateTimer(); void updateNumConnections(int numConnections); + void updateNetworkActive(bool networkActive); void updateAlert(); void updateBanlist(); }; diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp index 86fd4ebd65..4d34bf95e9 100644 --- a/src/qt/coincontroldialog.cpp +++ b/src/qt/coincontroldialog.cpp @@ -13,9 +13,9 @@ #include "txmempool.h" #include "walletmodel.h" -#include "coincontrol.h" +#include "wallet/coincontrol.h" #include "init.h" -#include "main.h" // For minRelayTxFee +#include "validation.h" // For minRelayTxFee #include "wallet/wallet.h" #include <boost/assign/list_of.hpp> // for 'map_list_of()' @@ -35,6 +35,13 @@ QList<CAmount> CoinControlDialog::payAmounts; CCoinControl* CoinControlDialog::coinControl = new CCoinControl(); bool CoinControlDialog::fSubtractFeeFromAmount = false; +bool CCoinControlWidgetItem::operator<(const QTreeWidgetItem &other) const { + int column = treeWidget()->sortColumn(); + if (column == CoinControlDialog::COLUMN_AMOUNT || column == CoinControlDialog::COLUMN_DATE || column == CoinControlDialog::COLUMN_CONFIRMATIONS) + return data(column, Qt::UserRole).toLongLong() < other.data(column, Qt::UserRole).toLongLong(); + return QTreeWidgetItem::operator<(other); +} + CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidget *parent) : QDialog(parent), ui(new Ui::CoinControlDialog), @@ -52,7 +59,7 @@ CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidge unlockAction = new QAction(tr("Unlock unspent"), this); // we need to enable/disable this // context menu - contextMenu = new QMenu(); + contextMenu = new QMenu(this); contextMenu->addAction(copyAddressAction); contextMenu->addAction(copyLabelAction); contextMenu->addAction(copyAmountAction); @@ -128,11 +135,9 @@ CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidge ui->treeWidget->setColumnWidth(COLUMN_CONFIRMATIONS, 110); ui->treeWidget->setColumnHidden(COLUMN_TXHASH, true); // store transaction hash in this column, but don't show it ui->treeWidget->setColumnHidden(COLUMN_VOUT_INDEX, true); // store vout index in this column, but don't show it - ui->treeWidget->setColumnHidden(COLUMN_AMOUNT_INT64, true); // store amount int64 in this column, but don't show it - ui->treeWidget->setColumnHidden(COLUMN_DATE_INT64, true); // store date int64 in this column, but don't show it // default view is sorted by amount desc - sortView(COLUMN_AMOUNT_INT64, Qt::DescendingOrder); + sortView(COLUMN_AMOUNT, Qt::DescendingOrder); // restore list mode and sortorder as a convenience feature QSettings settings; @@ -164,15 +169,6 @@ void CoinControlDialog::setModel(WalletModel *_model) } } -// helper function str_pad -QString CoinControlDialog::strPad(QString s, int nPadLength, QString sPadding) -{ - while (s.length() < nPadLength) - s = sPadding + s; - - return s; -} - // ok button void CoinControlDialog::buttonBoxClicked(QAbstractButton* button) { @@ -338,7 +334,7 @@ void CoinControlDialog::sortView(int column, Qt::SortOrder order) sortColumn = column; sortOrder = order; ui->treeWidget->sortItems(column, order); - ui->treeWidget->header()->setSortIndicator(getMappedColumn(sortColumn), sortOrder); + ui->treeWidget->header()->setSortIndicator(sortColumn, sortOrder); } // treeview: clicked on header @@ -346,12 +342,10 @@ void CoinControlDialog::headerSectionClicked(int logicalIndex) { if (logicalIndex == COLUMN_CHECKBOX) // click on most left column -> do nothing { - ui->treeWidget->header()->setSortIndicator(getMappedColumn(sortColumn), sortOrder); + ui->treeWidget->header()->setSortIndicator(sortColumn, sortOrder); } else { - logicalIndex = getMappedColumn(logicalIndex, false); - if (sortColumn == logicalIndex) sortOrder = ((sortOrder == Qt::AscendingOrder) ? Qt::DescendingOrder : Qt::AscendingOrder); else @@ -476,21 +470,21 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog) nQuantity++; // Amount - nAmount += out.tx->vout[out.i].nValue; + nAmount += out.tx->tx->vout[out.i].nValue; // Priority - dPriorityInputs += (double)out.tx->vout[out.i].nValue * (out.nDepth+1); + dPriorityInputs += (double)out.tx->tx->vout[out.i].nValue * (out.nDepth+1); // Bytes CTxDestination address; int witnessversion = 0; std::vector<unsigned char> witnessprogram; - if (out.tx->vout[out.i].scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) + if (out.tx->tx->vout[out.i].scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) { nBytesInputs += (32 + 4 + 1 + (107 / WITNESS_SCALE_FACTOR) + 4); fWitness = true; } - else if(ExtractDestination(out.tx->vout[out.i].scriptPubKey, address)) + else if(ExtractDestination(out.tx->tx->vout[out.i].scriptPubKey, address)) { CPubKey pubkey; CKeyID *keyid = boost::get<CKeyID>(&address); @@ -658,7 +652,7 @@ void CoinControlDialog::updateView() model->listCoins(mapCoins); BOOST_FOREACH(const PAIRTYPE(QString, std::vector<COutput>)& coins, mapCoins) { - QTreeWidgetItem *itemWalletAddress = new QTreeWidgetItem(); + CCoinControlWidgetItem *itemWalletAddress = new CCoinControlWidgetItem(); itemWalletAddress->setCheckState(COLUMN_CHECKBOX, Qt::Unchecked); QString sWalletAddress = coins.first; QString sWalletLabel = model->getAddressTableModel()->labelForAddress(sWalletAddress); @@ -683,19 +677,19 @@ void CoinControlDialog::updateView() CAmount nSum = 0; int nChildren = 0; BOOST_FOREACH(const COutput& out, coins.second) { - nSum += out.tx->vout[out.i].nValue; + nSum += out.tx->tx->vout[out.i].nValue; nChildren++; - QTreeWidgetItem *itemOutput; - if (treeMode) itemOutput = new QTreeWidgetItem(itemWalletAddress); - else itemOutput = new QTreeWidgetItem(ui->treeWidget); + CCoinControlWidgetItem *itemOutput; + if (treeMode) itemOutput = new CCoinControlWidgetItem(itemWalletAddress); + else itemOutput = new CCoinControlWidgetItem(ui->treeWidget); itemOutput->setFlags(flgCheckbox); itemOutput->setCheckState(COLUMN_CHECKBOX,Qt::Unchecked); // address CTxDestination outputAddress; QString sAddress = ""; - if(ExtractDestination(out.tx->vout[out.i].scriptPubKey, outputAddress)) + if(ExtractDestination(out.tx->tx->vout[out.i].scriptPubKey, outputAddress)) { sAddress = QString::fromStdString(CBitcoinAddress(outputAddress).ToString()); @@ -720,15 +714,16 @@ void CoinControlDialog::updateView() } // amount - itemOutput->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, out.tx->vout[out.i].nValue)); - itemOutput->setText(COLUMN_AMOUNT_INT64, strPad(QString::number(out.tx->vout[out.i].nValue), 15, " ")); // padding so that sorting works correctly + itemOutput->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, out.tx->tx->vout[out.i].nValue)); + itemOutput->setData(COLUMN_AMOUNT, Qt::UserRole, QVariant((qlonglong)out.tx->tx->vout[out.i].nValue)); // padding so that sorting works correctly // date itemOutput->setText(COLUMN_DATE, GUIUtil::dateTimeStr(out.tx->GetTxTime())); - itemOutput->setText(COLUMN_DATE_INT64, strPad(QString::number(out.tx->GetTxTime()), 20, " ")); + itemOutput->setData(COLUMN_DATE, Qt::UserRole, QVariant((qlonglong)out.tx->GetTxTime())); // confirmations - itemOutput->setText(COLUMN_CONFIRMATIONS, strPad(QString::number(out.nDepth), 8, " ")); + itemOutput->setText(COLUMN_CONFIRMATIONS, QString::number(out.nDepth)); + itemOutput->setData(COLUMN_CONFIRMATIONS, Qt::UserRole, QVariant((qlonglong)out.nDepth)); // transaction hash uint256 txhash = out.tx->GetHash(); @@ -756,7 +751,7 @@ void CoinControlDialog::updateView() { itemWalletAddress->setText(COLUMN_CHECKBOX, "(" + QString::number(nChildren) + ")"); itemWalletAddress->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, nSum)); - itemWalletAddress->setText(COLUMN_AMOUNT_INT64, strPad(QString::number(nSum), 15, " ")); + itemWalletAddress->setData(COLUMN_AMOUNT, Qt::UserRole, QVariant((qlonglong)nSum)); } } diff --git a/src/qt/coincontroldialog.h b/src/qt/coincontroldialog.h index 7d73421e3a..7c2269b1eb 100644 --- a/src/qt/coincontroldialog.h +++ b/src/qt/coincontroldialog.h @@ -28,6 +28,17 @@ namespace Ui { #define ASYMP_UTF8 "\xE2\x89\x88" +class CCoinControlWidgetItem : public QTreeWidgetItem +{ +public: + CCoinControlWidgetItem(QTreeWidget *parent, int type = Type) : QTreeWidgetItem(parent, type) {} + CCoinControlWidgetItem(int type = Type) : QTreeWidgetItem(type) {} + CCoinControlWidgetItem(QTreeWidgetItem *parent, int type = Type) : QTreeWidgetItem(parent, type) {} + + bool operator<(const QTreeWidgetItem &other) const; +}; + + class CoinControlDialog : public QDialog { Q_OBJECT @@ -59,13 +70,12 @@ private: const PlatformStyle *platformStyle; - QString strPad(QString, int, QString); void sortView(int, Qt::SortOrder); void updateView(); enum { - COLUMN_CHECKBOX, + COLUMN_CHECKBOX = 0, COLUMN_AMOUNT, COLUMN_LABEL, COLUMN_ADDRESS, @@ -73,30 +83,8 @@ private: COLUMN_CONFIRMATIONS, COLUMN_TXHASH, COLUMN_VOUT_INDEX, - COLUMN_AMOUNT_INT64, - COLUMN_DATE_INT64 }; - - // some columns have a hidden column containing the value used for sorting - int getMappedColumn(int column, bool fVisibleColumn = true) - { - if (fVisibleColumn) - { - if (column == COLUMN_AMOUNT_INT64) - return COLUMN_AMOUNT; - else if (column == COLUMN_DATE_INT64) - return COLUMN_DATE; - } - else - { - if (column == COLUMN_AMOUNT) - return COLUMN_AMOUNT_INT64; - else if (column == COLUMN_DATE) - return COLUMN_DATE_INT64; - } - - return column; - } + friend class CCoinControlWidgetItem; private Q_SLOTS: void showMenu(const QPoint &); diff --git a/src/qt/forms/modaloverlay.ui b/src/qt/forms/modaloverlay.ui index 73223735f5..a37672ad53 100644 --- a/src/qt/forms/modaloverlay.ui +++ b/src/qt/forms/modaloverlay.ui @@ -130,7 +130,7 @@ QLabel { color: rgb(40,40,40); }</string> <item> <widget class="QLabel" name="infoText"> <property name="text"> - <string>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet. This means that recent transactions will not be visible, and the balance will not be up-to-date until this process has completed.</string> + <string>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the bitcoin network, as detailed below.</string> </property> <property name="textFormat"> <enum>Qt::RichText</enum> @@ -149,7 +149,7 @@ QLabel { color: rgb(40,40,40); }</string> </font> </property> <property name="text"> - <string>Spending bitcoins may not be possible during that phase!</string> + <string>Attempting to spend bitcoins that are affected by not-yet-displayed transactions will not be accepted by the network.</string> </property> <property name="textFormat"> <enum>Qt::RichText</enum> @@ -219,7 +219,7 @@ QLabel { color: rgb(40,40,40); }</string> <item row="0" column="1"> <widget class="QLabel" name="numberOfBlocksLeft"> <property name="text"> - <string>unknown...</string> + <string>Unknown...</string> </property> </widget> </item> @@ -245,7 +245,7 @@ QLabel { color: rgb(40,40,40); }</string> </sizepolicy> </property> <property name="text"> - <string>unknown...</string> + <string>Unknown...</string> </property> </widget> </item> diff --git a/src/qt/forms/sendcoinsdialog.ui b/src/qt/forms/sendcoinsdialog.ui index 06e09074d1..0a8afa2e76 100644 --- a/src/qt/forms/sendcoinsdialog.ui +++ b/src/qt/forms/sendcoinsdialog.ui @@ -411,7 +411,7 @@ </property> </widget> </item> - </layout> + </layout> </item> <item> <layout class="QFormLayout" name="formLayoutCoinControl4"> @@ -1031,7 +1031,7 @@ <item> <widget class="QLabel" name="labelSmartFee3"> <property name="text"> - <string>Confirmation time:</string> + <string>Confirmation time target:</string> </property> <property name="margin"> <number>2</number> @@ -1064,7 +1064,7 @@ <number>0</number> </property> <property name="maximum"> - <number>24</number> + <number>23</number> </property> <property name="pageStep"> <number>1</number> @@ -1096,6 +1096,26 @@ </widget> </item> <item> + <spacer name="horizontalSpacer_7"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + <item> + <widget class="QLabel" name="confirmationTargetLabel"> + <property name="text"> + <string>(count)</string> + </property> + </widget> + </item> + <item> <spacer name="horizontalSpacer_3"> <property name="orientation"> <enum>Qt::Horizontal</enum> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 42dafa1175..8132e4fe0d 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -11,7 +11,7 @@ #include "primitives/transaction.h" #include "init.h" -#include "main.h" // For minRelayTxFee +#include "validation.h" // For minRelayTxFee #include "protocol.h" #include "script/script.h" #include "script/standard.h" @@ -55,6 +55,7 @@ #include <QSettings> #include <QTextDocument> // for Qt::mightBeRichText #include <QThread> +#include <QMouseEvent> #if QT_VERSION < 0x050000 #include <QUrl> @@ -116,7 +117,7 @@ static std::string DummyAddress(const CChainParams ¶ms) std::vector<unsigned char> sourcedata = params.Base58Prefix(CChainParams::PUBKEY_ADDRESS); sourcedata.insert(sourcedata.end(), dummydata, dummydata + sizeof(dummydata)); for(int i=0; i<256; ++i) { // Try every trailing byte - std::string s = EncodeBase58(begin_ptr(sourcedata), end_ptr(sourcedata)); + std::string s = EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size()); if (!CBitcoinAddress(s).IsValid()) return s; sourcedata[sourcedata.size()-1] += 1; @@ -291,17 +292,11 @@ void copyEntryData(QAbstractItemView *view, int column, int role) } } -QVariant getEntryData(QAbstractItemView *view, int column, int role) +QList<QModelIndex> getEntryData(QAbstractItemView *view, int column) { if(!view || !view->selectionModel()) - return QVariant(); - QModelIndexList selection = view->selectionModel()->selectedRows(column); - - if(!selection.isEmpty()) { - // Return first item - return (selection.at(0).data(role)); - } - return QVariant(); + return QList<QModelIndex>(); + return view->selectionModel()->selectedRows(column); } QString getSaveFileName(QWidget *parent, const QString &caption, const QString &dir, @@ -591,7 +586,8 @@ void TableViewLastColumnResizingFixer::on_geometriesChanged() * Initializes all internal variables and prepares the * the resize modes of the last 2 columns of the table and */ -TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth) : +TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent) : + QObject(parent), tableView(table), lastColumnMinimumWidth(lastColMinimumWidth), allColumnsMinimumWidth(allColsMinimumWidth) @@ -991,4 +987,15 @@ QString formateNiceTimeOffset(qint64 secs) } return timeBehindText; } + +void ClickableLabel::mouseReleaseEvent(QMouseEvent *event) +{ + Q_EMIT clicked(event->pos()); +} + +void ClickableProgressBar::mouseReleaseEvent(QMouseEvent *event) +{ + Q_EMIT clicked(event->pos()); +} + } // namespace GUIUtil diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index e28f68930f..4ea2aa36e7 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -14,6 +14,7 @@ #include <QProgressBar> #include <QString> #include <QTableView> +#include <QLabel> #include <boost/filesystem.hpp> @@ -67,10 +68,9 @@ namespace GUIUtil /** Return a field of the currently selected entry as a QString. Does nothing if nothing is selected. @param[in] column Data column to extract from the model - @param[in] role Data role to extract from the model @see TransactionView::copyLabel, TransactionView::copyAmount, TransactionView::copyAddress */ - QVariant getEntryData(QAbstractItemView *view, int column, int role); + QList<QModelIndex> getEntryData(QAbstractItemView *view, int column); void setClipboard(const QString& str); @@ -150,7 +150,7 @@ namespace GUIUtil Q_OBJECT public: - TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth); + TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent); void stretchColumnWidth(int column); private: @@ -202,18 +202,44 @@ namespace GUIUtil QString formateNiceTimeOffset(qint64 secs); + class ClickableLabel : public QLabel + { + Q_OBJECT + + Q_SIGNALS: + /** Emitted when the label is clicked. The relative mouse coordinates of the click are + * passed to the signal. + */ + void clicked(const QPoint& point); + protected: + void mouseReleaseEvent(QMouseEvent *event); + }; + + class ClickableProgressBar : public QProgressBar + { + Q_OBJECT + + Q_SIGNALS: + /** Emitted when the progressbar is clicked. The relative mouse coordinates of the click are + * passed to the signal. + */ + void clicked(const QPoint& point); + protected: + void mouseReleaseEvent(QMouseEvent *event); + }; + #if defined(Q_OS_MAC) && QT_VERSION >= 0x050000 // workaround for Qt OSX Bug: // https://bugreports.qt-project.org/browse/QTBUG-15631 // QProgressBar uses around 10% CPU even when app is in background - class ProgressBar : public QProgressBar + class ProgressBar : public ClickableProgressBar { bool event(QEvent *e) { return (e->type() != QEvent::StyleAnimationUpdate) ? QProgressBar::event(e) : false; } }; #else - typedef QProgressBar ProgressBar; + typedef ClickableProgressBar ProgressBar; #endif } // namespace GUIUtil diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp index ae0d8f5f63..3e8282e63d 100644 --- a/src/qt/modaloverlay.cpp +++ b/src/qt/modaloverlay.cpp @@ -132,10 +132,18 @@ void ModalOverlay::tipUpdate(int count, const QDateTime& blockDate, double nVeri if (estimateNumHeadersLeft < 24 && hasBestHeader) { ui->numberOfBlocksLeft->setText(QString::number(bestHeaderHeight - count)); } else { - ui->expectedTimeLeft->setText(tr("Unknown. Syncing Headers...")); + ui->numberOfBlocksLeft->setText(tr("Unknown. Syncing Headers (%1)...").arg(bestHeaderHeight)); + ui->expectedTimeLeft->setText(tr("Unknown...")); } } +void ModalOverlay::toggleVisibility() +{ + showHide(layerIsVisible, true); + if (!layerIsVisible) + userClosed = true; +} + void ModalOverlay::showHide(bool hide, bool userRequested) { if ( (layerIsVisible && !hide) || (!layerIsVisible && hide) || (!hide && userClosed && !userRequested)) diff --git a/src/qt/modaloverlay.h b/src/qt/modaloverlay.h index 66c0aa78cf..70d37b87ad 100644 --- a/src/qt/modaloverlay.h +++ b/src/qt/modaloverlay.h @@ -25,9 +25,11 @@ public Q_SLOTS: void tipUpdate(int count, const QDateTime& blockDate, double nVerificationProgress); void setKnownBestHeight(int count, const QDateTime& blockDate); + void toggleVisibility(); // will show or hide the modal layer void showHide(bool hide = false, bool userRequested = false); void closeClicked(); + bool isLayerVisible() { return layerIsVisible; } protected: bool eventFilter(QObject * obj, QEvent * ev); diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index 588059d0c5..2d5e9ff686 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -13,7 +13,7 @@ #include "guiutil.h" #include "optionsmodel.h" -#include "main.h" // for DEFAULT_SCRIPTCHECK_THREADS and MAX_SCRIPTCHECK_THREADS +#include "validation.h" // for DEFAULT_SCRIPTCHECK_THREADS and MAX_SCRIPTCHECK_THREADS #include "netbase.h" #include "txdb.h" // for -dbcache defaults diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index f82e153b67..d48c4d91e7 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -13,7 +13,7 @@ #include "amount.h" #include "init.h" -#include "main.h" // For DEFAULT_SCRIPTCHECK_THREADS +#include "validation.h" // For DEFAULT_SCRIPTCHECK_THREADS #include "net.h" #include "netbase.h" #include "txdb.h" // for -dbcache defaults diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h index b23b5f2607..d2e1fb23a3 100644 --- a/src/qt/optionsmodel.h +++ b/src/qt/optionsmodel.h @@ -81,7 +81,7 @@ private: int nDisplayUnit; QString strThirdPartyTxUrls; bool fCoinControlFeatures; - /* settings that were overriden by command-line */ + /* settings that were overridden by command-line */ QString strOverriddenByCommandLine; // Add option to list of GUI options overridden through command line/config file diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp index 7ccdb89c0c..ea1bb2fc94 100644 --- a/src/qt/overviewpage.cpp +++ b/src/qt/overviewpage.cpp @@ -25,8 +25,8 @@ class TxViewDelegate : public QAbstractItemDelegate { Q_OBJECT public: - TxViewDelegate(const PlatformStyle *_platformStyle): - QAbstractItemDelegate(), unit(BitcoinUnits::BTC), + TxViewDelegate(const PlatformStyle *_platformStyle, QObject *parent=nullptr): + QAbstractItemDelegate(parent), unit(BitcoinUnits::BTC), platformStyle(_platformStyle) { @@ -119,8 +119,7 @@ OverviewPage::OverviewPage(const PlatformStyle *platformStyle, QWidget *parent) currentWatchOnlyBalance(-1), currentWatchUnconfBalance(-1), currentWatchImmatureBalance(-1), - txdelegate(new TxViewDelegate(platformStyle)), - filter(0) + txdelegate(new TxViewDelegate(platformStyle, this)) { ui->setupUi(this); @@ -220,7 +219,7 @@ void OverviewPage::setWalletModel(WalletModel *model) if(model && model->getOptionsModel()) { // Set up transaction list - filter = new TransactionFilterProxy(); + filter.reset(new TransactionFilterProxy()); filter->setSourceModel(model->getTransactionTableModel()); filter->setLimit(NUM_ITEMS); filter->setDynamicSortFilter(true); @@ -228,7 +227,7 @@ void OverviewPage::setWalletModel(WalletModel *model) filter->setShowInactive(false); filter->sort(TransactionTableModel::Date, Qt::DescendingOrder); - ui->listTransactions->setModel(filter); + ui->listTransactions->setModel(filter.get()); ui->listTransactions->setModelColumn(TransactionTableModel::ToAddress); // Keep up to date with wallet diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h index 65cd3341b6..ffe0de328c 100644 --- a/src/qt/overviewpage.h +++ b/src/qt/overviewpage.h @@ -8,6 +8,7 @@ #include "amount.h" #include <QWidget> +#include <memory> class ClientModel; class TransactionFilterProxy; @@ -56,7 +57,7 @@ private: CAmount currentWatchImmatureBalance; TxViewDelegate *txdelegate; - TransactionFilterProxy *filter; + std::unique_ptr<TransactionFilterProxy> filter; private Q_SLOTS: void updateDisplayUnit(); diff --git a/src/qt/paymentrequestplus.cpp b/src/qt/paymentrequestplus.cpp index 20e1f79ffa..82be4d831f 100644 --- a/src/qt/paymentrequestplus.cpp +++ b/src/qt/paymentrequestplus.cpp @@ -159,14 +159,24 @@ bool PaymentRequestPlus::getMerchant(X509_STORE* certStore, QString& merchant) c std::string data_to_verify; // Everything but the signature rcopy.SerializeToString(&data_to_verify); - EVP_MD_CTX ctx; +#if OPENSSL_VERSION_NUMBER >= 0x10100000L + EVP_MD_CTX *ctx = EVP_MD_CTX_new(); + if (!ctx) throw SSLVerifyError("Error allocating OpenSSL context."); +#else + EVP_MD_CTX _ctx; + EVP_MD_CTX *ctx; + ctx = &_ctx; +#endif EVP_PKEY *pubkey = X509_get_pubkey(signing_cert); - EVP_MD_CTX_init(&ctx); - if (!EVP_VerifyInit_ex(&ctx, digestAlgorithm, NULL) || - !EVP_VerifyUpdate(&ctx, data_to_verify.data(), data_to_verify.size()) || - !EVP_VerifyFinal(&ctx, (const unsigned char*)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) { + EVP_MD_CTX_init(ctx); + if (!EVP_VerifyInit_ex(ctx, digestAlgorithm, NULL) || + !EVP_VerifyUpdate(ctx, data_to_verify.data(), data_to_verify.size()) || + !EVP_VerifyFinal(ctx, (const unsigned char*)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) { throw SSLVerifyError("Bad signature, invalid payment request."); } +#if OPENSSL_VERSION_NUMBER >= 0x10100000L + EVP_MD_CTX_free(ctx); +#endif // OpenSSL API for getting human printable strings from certs is baroque. int textlen = X509_NAME_get_text_by_NID(certname, NID_commonName, NULL, 0); diff --git a/src/qt/paymentrequestplus.h b/src/qt/paymentrequestplus.h index a73fe5f29d..a8bfcd2ac4 100644 --- a/src/qt/paymentrequestplus.h +++ b/src/qt/paymentrequestplus.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011-2015 The Bitcoin developers +// Copyright (c) 2011-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 9f23e77a13..d5f0156d6c 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -10,7 +10,7 @@ #include "base58.h" #include "chainparams.h" -#include "main.h" // For minRelayTxFee +#include "validation.h" // For minRelayTxFee #include "ui_interface.h" #include "util.h" #include "wallet/wallet.h" @@ -58,14 +58,19 @@ const char* BIP71_MIMETYPE_PAYMENTREQUEST = "application/bitcoin-paymentrequest" // BIP70 max payment request size in bytes (DoS protection) const qint64 BIP70_MAX_PAYMENTREQUEST_SIZE = 50000; -X509_STORE* PaymentServer::certStore = NULL; -void PaymentServer::freeCertStore() +struct X509StoreDeleter { + void operator()(X509_STORE* b) { + X509_STORE_free(b); + } +}; + +struct X509Deleter { + void operator()(X509* b) { X509_free(b); } +}; + +namespace // Anon namespace { - if (PaymentServer::certStore != NULL) - { - X509_STORE_free(PaymentServer::certStore); - PaymentServer::certStore = NULL; - } + std::unique_ptr<X509_STORE, X509StoreDeleter> certStore; } // @@ -80,7 +85,7 @@ static QString ipcServerName() // Append a simple hash of the datadir // Note that GetDataDir(true) returns a different path // for -testnet versus main net - QString ddir(QString::fromStdString(GetDataDir(true).string())); + QString ddir(GUIUtil::boostPathToQString(GetDataDir(true))); name.append(QString::number(qHash(ddir))); return name; @@ -107,20 +112,15 @@ static void ReportInvalidCertificate(const QSslCertificate& cert) // void PaymentServer::LoadRootCAs(X509_STORE* _store) { - if (PaymentServer::certStore == NULL) - atexit(PaymentServer::freeCertStore); - else - freeCertStore(); - // Unit tests mostly use this, to pass in fake root CAs: if (_store) { - PaymentServer::certStore = _store; + certStore.reset(_store); return; } // Normal execution, use either -rootcertificates or system certs: - PaymentServer::certStore = X509_STORE_new(); + certStore.reset(X509_STORE_new()); // Note: use "-system-" default here so that users can pass -rootcertificates="" // and get 'I don't like X.509 certificates, don't trust anybody' behavior: @@ -167,11 +167,11 @@ void PaymentServer::LoadRootCAs(X509_STORE* _store) QByteArray certData = cert.toDer(); const unsigned char *data = (const unsigned char *)certData.data(); - X509* x509 = d2i_X509(0, &data, certData.size()); - if (x509 && X509_STORE_add_cert(PaymentServer::certStore, x509)) + std::unique_ptr<X509, X509Deleter> x509(d2i_X509(0, &data, certData.size())); + if (x509 && X509_STORE_add_cert(certStore.get(), x509.get())) { - // Note: X509_STORE_free will free the X509* objects when - // the PaymentServer is destroyed + // Note: X509_STORE increases the reference count to the X509 object, + // we still have to release our reference to it. ++nRootCerts; } else @@ -550,7 +550,7 @@ bool PaymentServer::processPaymentRequest(const PaymentRequestPlus& request, Sen recipient.paymentRequest = request; recipient.message = GUIUtil::HtmlEscape(request.getDetails().memo()); - request.getMerchant(PaymentServer::certStore, recipient.authenticatedMerchant); + request.getMerchant(certStore.get(), recipient.authenticatedMerchant); QList<std::pair<CScript, CAmount> > sendingTos = request.getPayTo(); QStringList addresses; @@ -807,3 +807,8 @@ bool PaymentServer::verifyAmount(const CAmount& requestAmount) } return fVerified; } + +X509_STORE* PaymentServer::getCertStore() +{ + return certStore.get(); +} diff --git a/src/qt/paymentserver.h b/src/qt/paymentserver.h index 2d27ed078b..7202e7dada 100644 --- a/src/qt/paymentserver.h +++ b/src/qt/paymentserver.h @@ -83,7 +83,7 @@ public: static void LoadRootCAs(X509_STORE* store = NULL); // Return certificate store - static X509_STORE* getCertStore() { return certStore; } + static X509_STORE* getCertStore(); // OptionsModel is used for getting proxy settings and display unit void setOptionsModel(OptionsModel *optionsModel); @@ -140,9 +140,6 @@ private: bool saveURIs; // true during startup QLocalServer* uriServer; - static X509_STORE* certStore; // Trusted root certificates - static void freeCertStore(); - QNetworkAccessManager* netManager; // Used to fetch payment requests OptionsModel *optionsModel; diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index a2f9471fcc..dd4bd55396 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -8,6 +8,7 @@ #include "guiconstants.h" #include "guiutil.h" +#include "validation.h" // for cs_main #include "sync.h" #include <QDebug> @@ -114,12 +115,12 @@ PeerTableModel::PeerTableModel(ClientModel *parent) : timer(0) { columns << tr("NodeId") << tr("Node/Service") << tr("User Agent") << tr("Ping"); - priv = new PeerTablePriv(); + priv.reset(new PeerTablePriv()); // default to unsorted priv->sortColumn = -1; // set up timer for auto refresh - timer = new QTimer(); + timer = new QTimer(this); connect(timer, SIGNAL(timeout()), SLOT(refresh())); timer->setInterval(MODEL_UPDATE_DELAY); @@ -127,6 +128,11 @@ PeerTableModel::PeerTableModel(ClientModel *parent) : refresh(); } +PeerTableModel::~PeerTableModel() +{ + // Intentionally left empty +} + void PeerTableModel::startAutoRefresh() { timer->start(); diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h index a4f7bbdb3d..3e4f0a68c4 100644 --- a/src/qt/peertablemodel.h +++ b/src/qt/peertablemodel.h @@ -5,7 +5,7 @@ #ifndef BITCOIN_QT_PEERTABLEMODEL_H #define BITCOIN_QT_PEERTABLEMODEL_H -#include "main.h" // For CNodeStateStats +#include "net_processing.h" // For CNodeStateStats #include "net.h" #include <QAbstractTableModel> @@ -46,6 +46,7 @@ class PeerTableModel : public QAbstractTableModel public: explicit PeerTableModel(ClientModel *parent = 0); + ~PeerTableModel(); const CNodeCombinedStats *getNodeStats(int idx); int getRowByNodeId(NodeId nodeid); void startAutoRefresh(); @@ -75,7 +76,7 @@ public Q_SLOTS: private: ClientModel *clientModel; QStringList columns; - PeerTablePriv *priv; + std::unique_ptr<PeerTablePriv> priv; QTimer *timer; }; diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp index b50cad4975..dd83547a91 100644 --- a/src/qt/receivecoinsdialog.cpp +++ b/src/qt/receivecoinsdialog.cpp @@ -25,6 +25,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWidget *parent) : QDialog(parent), ui(new Ui::ReceiveCoinsDialog), + columnResizingFixer(0), model(0), platformStyle(_platformStyle) { @@ -49,7 +50,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid QAction *copyAmountAction = new QAction(tr("Copy amount"), this); // context menu - contextMenu = new QMenu(); + contextMenu = new QMenu(this); contextMenu->addAction(copyURIAction); contextMenu->addAction(copyLabelAction); contextMenu->addAction(copyMessageAction); @@ -91,7 +92,7 @@ void ReceiveCoinsDialog::setModel(WalletModel *_model) SIGNAL(selectionChanged(QItemSelection, QItemSelection)), this, SLOT(recentRequestsView_selectionChanged(QItemSelection, QItemSelection))); // Last 2 columns are set by the columnResizingFixer, when the table geometry is ready. - columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(tableView, AMOUNT_MINIMUM_COLUMN_WIDTH, DATE_COLUMN_WIDTH); + columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(tableView, AMOUNT_MINIMUM_COLUMN_WIDTH, DATE_COLUMN_WIDTH, this); } } diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp index 998c9176d7..f896461735 100644 --- a/src/qt/receiverequestdialog.cpp +++ b/src/qt/receiverequestdialog.cpp @@ -32,7 +32,7 @@ QRImageWidget::QRImageWidget(QWidget *parent): QLabel(parent), contextMenu(0) { - contextMenu = new QMenu(); + contextMenu = new QMenu(this); QAction *saveImageAction = new QAction(tr("&Save Image..."), this); connect(saveImageAction, SIGNAL(triggered()), this, SLOT(saveImage())); contextMenu->addAction(saveImageAction); diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp index 2335d6b282..35d37bb22b 100644 --- a/src/qt/recentrequeststablemodel.cpp +++ b/src/qt/recentrequeststablemodel.cpp @@ -14,7 +14,7 @@ #include <boost/foreach.hpp> RecentRequestsTableModel::RecentRequestsTableModel(CWallet *wallet, WalletModel *parent) : - walletModel(parent) + QAbstractTableModel(parent), walletModel(parent) { Q_UNUSED(wallet); nReceiveRequestsMaxId = 0; diff --git a/src/qt/recentrequeststablemodel.h b/src/qt/recentrequeststablemodel.h index 0193e748d7..8ee2c9cbac 100644 --- a/src/qt/recentrequeststablemodel.h +++ b/src/qt/recentrequeststablemodel.h @@ -27,7 +27,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { unsigned int nDate = date.toTime_t(); READWRITE(this->nVersion); diff --git a/src/qt/res/icons/network_disabled.png b/src/qt/res/icons/network_disabled.png Binary files differnew file mode 100644 index 0000000000..269c3cfab8 --- /dev/null +++ b/src/qt/res/icons/network_disabled.png diff --git a/src/qt/res/src/connect-0.svg b/src/qt/res/src/connect-0.svg index 7d2afac622..0920555b96 100644 --- a/src/qt/res/src/connect-0.svg +++ b/src/qt/res/src/connect-0.svg @@ -7,8 +7,8 @@ xmlns="http://www.w3.org/2000/svg" id="svg2" viewBox="0 0 24 24" - height="24" - width="24" + height="92" + width="92" version="1.2"> <metadata id="metadata10"> diff --git a/src/qt/res/src/connect-1.svg b/src/qt/res/src/connect-1.svg index d17928c97d..25dea4cd3a 100644 --- a/src/qt/res/src/connect-1.svg +++ b/src/qt/res/src/connect-1.svg @@ -6,8 +6,8 @@ xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" version="1.2" - width="24" - height="24" + width="92" + height="92" viewBox="0 0 24 24" id="svg2"> <metadata diff --git a/src/qt/res/src/connect-2.svg b/src/qt/res/src/connect-2.svg index 841ca6071d..bb98333d23 100644 --- a/src/qt/res/src/connect-2.svg +++ b/src/qt/res/src/connect-2.svg @@ -6,8 +6,8 @@ xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" version="1.2" - width="24" - height="24" + width="92" + height="92" viewBox="0 0 24 24" id="svg2"> <metadata diff --git a/src/qt/res/src/connect-3.svg b/src/qt/res/src/connect-3.svg index b06e67daf8..a54a55ef61 100644 --- a/src/qt/res/src/connect-3.svg +++ b/src/qt/res/src/connect-3.svg @@ -7,8 +7,8 @@ xmlns="http://www.w3.org/2000/svg" id="svg2" viewBox="0 0 24 24" - height="24" - width="24" + height="92" + width="92" version="1.2"> <metadata id="metadata10"> diff --git a/src/qt/res/src/connect-4.svg b/src/qt/res/src/connect-4.svg index 0abc7955fd..b83b9f9d03 100644 --- a/src/qt/res/src/connect-4.svg +++ b/src/qt/res/src/connect-4.svg @@ -6,8 +6,8 @@ xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" version="1.2" - width="24" - height="24" + width="92" + height="92" viewBox="0 0 24 24" id="svg2"> <metadata diff --git a/src/qt/res/src/network_disabled.svg b/src/qt/res/src/network_disabled.svg new file mode 100644 index 0000000000..a041d77439 --- /dev/null +++ b/src/qt/res/src/network_disabled.svg @@ -0,0 +1,49 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + id="svg2" + viewBox="0 0 24 24" + width="92" + height="92" + version="1.2"> + <metadata + id="metadata10"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title></dc:title> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs8" /> + <g + id="g4142" + transform="matrix(0,-1,-1,0,23.96,24)"> + <g + id="g4210" + transform="matrix(-1,0,0,1,59.86,-106.6)"> + <g + id="g4289" + transform="matrix(-1,0,0,1,-16.98,0.8136)"> + <g + id="g4291"> + <path + id="path4293" + transform="matrix(0,-1,-1,0,-52.84,129.7464)" + d="M 20.146484 1.0097656 C 18.746484 1.0097656 17.646484 1.8597656 17.146484 3.0097656 L 14.447266 3.0097656 C 12.247266 3.0097656 10.447266 4.7997656 10.447266 7.0097656 L 10.447266 8.1425781 C 10.128283 8.0634395 9.7980674 8.0097656 9.4472656 8.0097656 L 6.8457031 8.0097656 C 6.3457031 6.8597656 5.2457031 6.0097656 3.8457031 6.0097656 C 1.8457031 6.0097656 0.34570312 7.5997656 0.34570312 9.5097656 C 0.34570312 11.419766 1.8457031 13.009766 3.8457031 13.009766 C 5.2457031 13.009766 6.3457031 12.159766 6.8457031 11.009766 L 8.9746094 11.009766 C 8.8693536 11.330059 8.8007812 11.663345 8.8007812 12.001953 C 8.8007813 12.841953 9.1402344 13.671625 9.7402344 14.265625 C 9.9479364 14.475439 10.191281 14.640988 10.447266 14.783203 L 10.447266 16.980469 C 10.447266 17.530469 9.9472656 17.980469 9.4472656 17.980469 L 6.8457031 17.980469 C 6.3457031 16.830469 5.2457031 15.980469 3.8457031 15.980469 C 1.8457031 15.980469 0.34570312 17.570469 0.34570312 19.480469 C 0.34570312 21.390469 1.8457031 22.990234 3.8457031 22.990234 C 5.2457031 22.990234 6.2457031 22.14 6.8457031 21 L 9.4472656 21 C 11.747266 21 13.447266 19.19 13.447266 17 L 13.447266 15.869141 C 13.768504 15.952624 14.100702 16.009766 14.447266 16.009766 L 17.146484 16.009766 C 17.646484 17.159766 18.746484 18.009766 20.146484 18.009766 C 22.046484 18.009766 23.646484 16.449766 23.646484 14.509766 C 23.646484 12.579766 22.046484 11.009766 20.146484 11.009766 C 18.746484 11.009766 17.646484 11.859766 17.146484 13.009766 L 15.009766 13.009766 C 15.119625 12.684735 15.189453 12.346256 15.189453 12 C 15.189453 11.16 14.849906 10.339953 14.253906 9.7519531 C 14.0189 9.51021 13.74069 9.3244522 13.447266 9.171875 L 13.447266 7.0097656 C 13.447266 6.4597656 13.947266 6.0097656 14.447266 6.0097656 L 17.146484 6.0097656 C 17.646484 7.1597656 18.746484 8.0097656 20.146484 8.0097656 C 22.046484 8.0097656 23.646484 6.4397656 23.646484 4.5097656 C 23.646484 2.5697656 22.046484 1.0097656 20.146484 1.0097656 z M 20.146484 2.0097656 C 21.446484 2.0097656 22.646484 3.1297656 22.646484 4.5097656 C 22.646484 5.8797656 21.446484 7.0097656 20.146484 7.0097656 C 19.046484 7.0097656 18.145703 6.3096094 17.845703 5.3496094 L 17.746094 5.0097656 L 14.447266 5.0097656 C 13.347266 5.0097656 12.447266 5.8997656 12.447266 7.0097656 L 12.447266 8.8476562 C 12.298996 8.8261586 12.150754 8.8027344 12 8.8027344 C 11.954455 8.8027344 11.910576 8.8144662 11.865234 8.8164062 C 11.733157 8.716719 11.592447 8.6297054 11.447266 8.546875 L 11.447266 7.0097656 C 11.447266 5.3597656 12.847266 4.0097656 14.447266 4.0097656 L 17.746094 4.0097656 L 17.845703 3.6699219 C 18.145703 2.7099219 19.046484 2.0097656 20.146484 2.0097656 z M 3.8457031 7.0097656 C 4.9457031 7.0097656 5.8464844 7.7099219 6.1464844 8.6699219 L 6.2460938 9.0097656 L 9.4472656 9.0097656 C 9.8222656 9.0097656 10.165234 9.0792969 10.474609 9.2050781 C 10.207952 9.3508551 9.9554097 9.5233651 9.7402344 9.7421875 C 9.6554755 9.8255337 9.5878282 9.9233484 9.5136719 10.015625 C 9.4909069 10.014746 9.470428 10.009766 9.4472656 10.009766 L 6.2460938 10.009766 L 6.1464844 10.349609 C 5.8464844 11.319609 4.9457031 12.009766 3.8457031 12.009766 C 2.4457031 12.009766 1.3457031 10.899766 1.3457031 9.5097656 C 1.3457031 8.1197656 2.4457031 7.0097656 3.8457031 7.0097656 z M 20.146484 12.009766 C 21.446484 12.009766 22.646484 13.139766 22.646484 14.509766 C 22.646484 15.889766 21.446484 17.009766 20.146484 17.009766 C 19.046484 17.009766 18.145703 16.309609 17.845703 15.349609 L 17.746094 15.009766 L 14.447266 15.009766 C 14.100959 15.009766 13.772729 14.94045 13.470703 14.816406 C 13.754756 14.666178 14.02454 14.485593 14.253906 14.253906 C 14.328913 14.179151 14.386367 14.091269 14.453125 14.009766 L 17.746094 14.009766 L 17.845703 13.669922 C 18.145703 12.709922 19.046484 12.009766 20.146484 12.009766 z M 11.447266 15.144531 C 11.629002 15.17624 11.813246 15.199219 12 15.199219 C 12.018544 15.199153 12.036184 15.193748 12.054688 15.193359 C 12.180437 15.288088 12.3107 15.373496 12.447266 15.453125 L 12.447266 17 C 12.447266 18.67 11.147266 20 9.4472656 20 L 6.6464844 20 L 6.2460938 20 L 6.1464844 20.330078 C 5.8464844 21.290078 4.9457031 21.990234 3.8457031 21.990234 C 2.4457031 21.990234 1.3457031 20.870469 1.3457031 19.480469 C 1.3457031 18.090469 2.4457031 16.980469 3.8457031 16.980469 C 4.9457031 16.980469 5.8464844 17.680625 6.1464844 18.640625 L 6.2460938 18.980469 L 9.4472656 18.980469 C 10.547266 18.980469 11.447266 18.090469 11.447266 16.980469 L 11.447266 15.144531 z " + style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;white-space:normal;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#000000;fill-opacity:0.5;fill-rule:nonzero;stroke:none;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate" /> + </g> + </g> + </g> + </g> + <path d="M 3,3 l 18,18" style="stroke-width: 3; stroke: #000000; stroke-linecap: round;" /> + <path d="M 21,3 l -18,18" style="stroke-width: 3; stroke: #000000; stroke-linecap: round;" /> +</svg> diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index f10dddf589..4ba2f2615f 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -343,7 +343,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) : ui(new Ui::RPCConsole), clientModel(0), historyPtr(0), - cachedNodeid(-1), platformStyle(_platformStyle), peersTableContextMenu(0), banTableContextMenu(0), @@ -383,7 +382,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) : // based timer interface RPCSetTimerInterfaceIfUnset(rpcTimerInterface); - startExecutor(); setTrafficGraphRange(INITIAL_TRAFFIC_GRAPH_MINS); ui->detailWidget->hide(); @@ -397,7 +395,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) : RPCConsole::~RPCConsole() { GUIUtil::saveWindowGeometry("nRPCConsoleWindow", this); - Q_EMIT stopExecutor(); RPCUnsetTimerInterface(rpcTimerInterface); delete rpcTimerInterface; delete ui; @@ -459,6 +456,9 @@ void RPCConsole::setClientModel(ClientModel *model) setNumBlocks(model->getNumBlocks(), model->getLastBlockDate(), model->getVerificationProgress(NULL), false); connect(model, SIGNAL(numBlocksChanged(int,QDateTime,double,bool)), this, SLOT(setNumBlocks(int,QDateTime,double,bool))); + updateNetworkState(); + connect(model, SIGNAL(networkActiveChanged(bool)), this, SLOT(setNetworkActive(bool))); + updateTrafficStats(model->getTotalBytesRecv(), model->getTotalBytesSent()); connect(model, SIGNAL(bytesChanged(quint64,quint64)), this, SLOT(updateTrafficStats(quint64, quint64))); @@ -469,7 +469,7 @@ void RPCConsole::setClientModel(ClientModel *model) ui->peerWidget->verticalHeader()->hide(); ui->peerWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); ui->peerWidget->setSelectionBehavior(QAbstractItemView::SelectRows); - ui->peerWidget->setSelectionMode(QAbstractItemView::SingleSelection); + ui->peerWidget->setSelectionMode(QAbstractItemView::ExtendedSelection); ui->peerWidget->setContextMenuPolicy(Qt::CustomContextMenu); ui->peerWidget->setColumnWidth(PeerTableModel::Address, ADDRESS_COLUMN_WIDTH); ui->peerWidget->setColumnWidth(PeerTableModel::Subversion, SUBVERSION_COLUMN_WIDTH); @@ -477,14 +477,14 @@ void RPCConsole::setClientModel(ClientModel *model) ui->peerWidget->horizontalHeader()->setStretchLastSection(true); // create peer table context menu actions - QAction* disconnectAction = new QAction(tr("&Disconnect Node"), this); - QAction* banAction1h = new QAction(tr("Ban Node for") + " " + tr("1 &hour"), this); - QAction* banAction24h = new QAction(tr("Ban Node for") + " " + tr("1 &day"), this); - QAction* banAction7d = new QAction(tr("Ban Node for") + " " + tr("1 &week"), this); - QAction* banAction365d = new QAction(tr("Ban Node for") + " " + tr("1 &year"), this); + QAction* disconnectAction = new QAction(tr("&Disconnect"), this); + QAction* banAction1h = new QAction(tr("Ban for") + " " + tr("1 &hour"), this); + QAction* banAction24h = new QAction(tr("Ban for") + " " + tr("1 &day"), this); + QAction* banAction7d = new QAction(tr("Ban for") + " " + tr("1 &week"), this); + QAction* banAction365d = new QAction(tr("Ban for") + " " + tr("1 &year"), this); // create peer table context menu - peersTableContextMenu = new QMenu(); + peersTableContextMenu = new QMenu(this); peersTableContextMenu->addAction(disconnectAction); peersTableContextMenu->addAction(banAction1h); peersTableContextMenu->addAction(banAction24h); @@ -514,7 +514,9 @@ void RPCConsole::setClientModel(ClientModel *model) this, SLOT(peerSelected(const QItemSelection &, const QItemSelection &))); // peer table signal handling - update peer details when new nodes are added to the model connect(model->getPeerTableModel(), SIGNAL(layoutChanged()), this, SLOT(peerLayoutChanged())); - + // peer table signal handling - cache selected node ids + connect(model->getPeerTableModel(), SIGNAL(layoutAboutToBeChanged()), this, SLOT(peerLayoutAboutToChange())); + // set up ban table ui->banlistWidget->setModel(model->getBanTableModel()); ui->banlistWidget->verticalHeader()->hide(); @@ -527,10 +529,10 @@ void RPCConsole::setClientModel(ClientModel *model) ui->banlistWidget->horizontalHeader()->setStretchLastSection(true); // create ban table context menu action - QAction* unbanAction = new QAction(tr("&Unban Node"), this); + QAction* unbanAction = new QAction(tr("&Unban"), this); // create ban table context menu - banTableContextMenu = new QMenu(); + banTableContextMenu = new QMenu(this); banTableContextMenu->addAction(unbanAction); // ban table context menu signals @@ -561,6 +563,14 @@ void RPCConsole::setClientModel(ClientModel *model) autoCompleter = new QCompleter(wordList, this); ui->lineEdit->setCompleter(autoCompleter); autoCompleter->popup()->installEventFilter(this); + // Start thread to execute RPC commands. + startExecutor(); + } + if (!model) { + // Client model is being set to 0, this means shutdown() is about to be called. + // Make sure we clean up the executor thread + Q_EMIT stopExecutor(); + thread.wait(); } } @@ -640,13 +650,18 @@ void RPCConsole::clear(bool clearHistory) "td.message { font-family: %1; font-size: %2; white-space:pre-wrap; } " "td.cmd-request { color: #006060; } " "td.cmd-error { color: red; } " + ".secwarning { color: red; }" "b { color: #006060; } " ).arg(fixedFontInfo.family(), QString("%1pt").arg(consoleFontSize)) ); message(CMD_REPLY, (tr("Welcome to the %1 RPC console.").arg(tr(PACKAGE_NAME)) + "<br>" + tr("Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.") + "<br>" + - tr("Type <b>help</b> for an overview of available commands.")), true); + tr("Type <b>help</b> for an overview of available commands.")) + + "<br><span class=\"secwarning\">" + + tr("WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramification of a command.") + + "</span>", + true); } void RPCConsole::keyPressEvent(QKeyEvent *event) @@ -673,16 +688,30 @@ void RPCConsole::message(int category, const QString &message, bool html) ui->messagesWidget->append(out); } +void RPCConsole::updateNetworkState() +{ + QString connections = QString::number(clientModel->getNumConnections()) + " ("; + connections += tr("In:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_IN)) + " / "; + connections += tr("Out:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_OUT)) + ")"; + + if(!clientModel->getNetworkActive()) { + connections += " (" + tr("Network activity disabled") + ")"; + } + + ui->numberOfConnections->setText(connections); +} + void RPCConsole::setNumConnections(int count) { if (!clientModel) return; - QString connections = QString::number(count) + " ("; - connections += tr("In:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_IN)) + " / "; - connections += tr("Out:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_OUT)) + ")"; + updateNetworkState(); +} - ui->numberOfConnections->setText(connections); +void RPCConsole::setNetworkActive(bool networkActive) +{ + updateNetworkState(); } void RPCConsole::setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers) @@ -741,9 +770,8 @@ void RPCConsole::browseHistory(int offset) void RPCConsole::startExecutor() { - QThread *thread = new QThread; RPCExecutor *executor = new RPCExecutor(); - executor->moveToThread(thread); + executor->moveToThread(&thread); // Replies from executor object must go to this object connect(executor, SIGNAL(reply(int,QString)), this, SLOT(message(int,QString))); @@ -751,16 +779,14 @@ void RPCConsole::startExecutor() connect(this, SIGNAL(cmdRequest(QString)), executor, SLOT(request(QString))); // On stopExecutor signal - // - queue executor for deletion (in execution thread) // - quit the Qt event loop in the execution thread - connect(this, SIGNAL(stopExecutor()), executor, SLOT(deleteLater())); - connect(this, SIGNAL(stopExecutor()), thread, SLOT(quit())); - // Queue the thread for deletion (in this thread) when it is finished - connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater())); + connect(this, SIGNAL(stopExecutor()), &thread, SLOT(quit())); + // - queue executor for deletion (in execution thread) + connect(&thread, SIGNAL(finished()), executor, SLOT(deleteLater()), Qt::DirectConnection); // Default implementation of QThread::run() simply spins up an event loop in the thread, // which is what we want. - thread->start(); + thread.start(); } void RPCConsole::on_tabWidget_currentChanged(int index) @@ -825,6 +851,17 @@ void RPCConsole::peerSelected(const QItemSelection &selected, const QItemSelecti updateNodeDetail(stats); } +void RPCConsole::peerLayoutAboutToChange() +{ + QModelIndexList selected = ui->peerWidget->selectionModel()->selectedIndexes(); + cachedNodeids.clear(); + for(int i = 0; i < selected.size(); i++) + { + const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.at(i).row()); + cachedNodeids.append(stats->nodeStats.nodeid); + } +} + void RPCConsole::peerLayoutChanged() { if (!clientModel || !clientModel->getPeerTableModel()) @@ -834,7 +871,7 @@ void RPCConsole::peerLayoutChanged() bool fUnselect = false; bool fReselect = false; - if (cachedNodeid == -1) // no node selected yet + if (cachedNodeids.empty()) // no node selected yet return; // find the currently selected row @@ -846,7 +883,7 @@ void RPCConsole::peerLayoutChanged() // check if our detail node has a row in the table (it may not necessarily // be at selectedRow since its position can change after a layout change) - int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeid); + int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.first()); if (detailNodeRow < 0) { @@ -872,7 +909,10 @@ void RPCConsole::peerLayoutChanged() if (fReselect) { - ui->peerWidget->selectRow(detailNodeRow); + for(int i = 0; i < cachedNodeids.size(); i++) + { + ui->peerWidget->selectRow(clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.at(i))); + } } if (stats) @@ -881,9 +921,6 @@ void RPCConsole::peerLayoutChanged() void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats) { - // Update cached nodeid - cachedNodeid = stats->nodeStats.nodeid; - // update the detail ui with latest node information QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " "); peerAddrDetails += tr("(node id: %1)").arg(QString::number(stats->nodeStats.nodeid)); @@ -973,33 +1010,44 @@ void RPCConsole::disconnectSelectedNode() { if(!g_connman) return; - // Get currently selected peer address - NodeId id = GUIUtil::getEntryData(ui->peerWidget, 0, PeerTableModel::NetNodeId).toInt(); - // Find the node, disconnect it and clear the selected node - if(g_connman->DisconnectNode(id)) - clearSelectedNode(); + + // Get selected peer addresses + QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); + for(int i = 0; i < nodes.count(); i++) + { + // Get currently selected peer address + NodeId id = nodes.at(i).data().toInt(); + // Find the node, disconnect it and clear the selected node + if(g_connman->DisconnectNode(id)) + clearSelectedNode(); + } } void RPCConsole::banSelectedNode(int bantime) { if (!clientModel || !g_connman) return; - - if(cachedNodeid == -1) - return; - - // Get currently selected peer address - int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeid); - if(detailNodeRow < 0) - return; - - // Find possible nodes, ban it and clear the selected node - const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow); - if(stats) { - g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime); - clearSelectedNode(); - clientModel->getBanTableModel()->refresh(); + + // Get selected peer addresses + QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); + for(int i = 0; i < nodes.count(); i++) + { + // Get currently selected peer address + NodeId id = nodes.at(i).data().toInt(); + + // Get currently selected peer address + int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(id); + if(detailNodeRow < 0) + return; + + // Find possible nodes, ban it and clear the selected node + const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow); + if(stats) { + g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime); + } } + clearSelectedNode(); + clientModel->getBanTableModel()->refresh(); } void RPCConsole::unbanSelectedNode() @@ -1007,22 +1055,27 @@ void RPCConsole::unbanSelectedNode() if (!clientModel) return; - // Get currently selected ban address - QString strNode = GUIUtil::getEntryData(ui->banlistWidget, 0, BanTableModel::Address).toString(); - CSubNet possibleSubnet; - - LookupSubNet(strNode.toStdString().c_str(), possibleSubnet); - if (possibleSubnet.IsValid() && g_connman) + // Get selected ban addresses + QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, BanTableModel::Address); + for(int i = 0; i < nodes.count(); i++) { - g_connman->Unban(possibleSubnet); - clientModel->getBanTableModel()->refresh(); + // Get currently selected ban address + QString strNode = nodes.at(i).data().toString(); + CSubNet possibleSubnet; + + LookupSubNet(strNode.toStdString().c_str(), possibleSubnet); + if (possibleSubnet.IsValid() && g_connman) + { + g_connman->Unban(possibleSubnet); + clientModel->getBanTableModel()->refresh(); + } } } void RPCConsole::clearSelectedNode() { ui->peerWidget->selectionModel()->clearSelection(); - cachedNodeid = -1; + cachedNodeids.clear(); ui->detailWidget->hide(); ui->peerHeading->setText(tr("Select a peer to view detailed information.")); } diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index 50224a1cc0..ab8c3dc914 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -12,6 +12,7 @@ #include <QWidget> #include <QCompleter> +#include <QThread> class ClientModel; class PlatformStyle; @@ -88,6 +89,8 @@ public Q_SLOTS: void message(int category, const QString &message, bool html = false); /** Set number of connections shown in the UI */ void setNumConnections(int count); + /** Set network state shown in the UI */ + void setNetworkActive(bool networkActive); /** Set number of blocks and last block date shown in the UI */ void setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers); /** Set size (number of transactions and memory usage) of the mempool in the UI */ @@ -98,6 +101,8 @@ public Q_SLOTS: void scrollToEnd(); /** Handle selection of peer in peers list */ void peerSelected(const QItemSelection &selected, const QItemSelection &deselected); + /** Handle selection caching before update */ + void peerLayoutAboutToChange(); /** Handle updated peer information */ void peerLayoutChanged(); /** Disconnect a selected node on the Peers tab */ @@ -135,13 +140,17 @@ private: ClientModel *clientModel; QStringList history; int historyPtr; - NodeId cachedNodeid; + QList<NodeId> cachedNodeids; const PlatformStyle *platformStyle; RPCTimerInterface *rpcTimerInterface; QMenu *peersTableContextMenu; QMenu *banTableContextMenu; int consoleFontSize; QCompleter *autoCompleter; + QThread thread; + + /** Update UI with latest network info from model. */ + void updateNetworkState(); }; #endif // BITCOIN_QT_RPCCONSOLE_H diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 4b2ba7d624..13210a1adc 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -16,8 +16,8 @@ #include "walletmodel.h" #include "base58.h" -#include "coincontrol.h" -#include "main.h" // mempool and minRelayTxFee +#include "wallet/coincontrol.h" +#include "validation.h" // mempool and minRelayTxFee #include "ui_interface.h" #include "txmempool.h" #include "wallet/wallet.h" @@ -110,7 +110,6 @@ SendCoinsDialog::SendCoinsDialog(const PlatformStyle *_platformStyle, QWidget *p ui->groupCustomFee->setId(ui->radioCustomPerKilobyte, 0); ui->groupCustomFee->setId(ui->radioCustomAtLeast, 1); ui->groupCustomFee->button((int)std::max(0, std::min(1, settings.value("nCustomFeeRadio").toInt())))->setChecked(true); - ui->sliderSmartFee->setValue(settings.value("nSmartFeeSliderPosition").toInt()); ui->customFee->setValue(settings.value("nTransactionFee").toLongLong()); ui->checkBoxMinimumFee->setChecked(settings.value("fPayOnlyMinFee").toBool()); minimizeFeeSection(settings.value("fFeeSectionMinimized").toBool()); @@ -172,6 +171,13 @@ void SendCoinsDialog::setModel(WalletModel *_model) updateMinFeeLabel(); updateSmartFeeLabel(); updateGlobalFeeVariables(); + + // set the smartfee-sliders default value (wallets default conf.target or last stored value) + QSettings settings; + if (settings.value("nSmartFeeSliderPosition").toInt() == 0) + ui->sliderSmartFee->setValue(ui->sliderSmartFee->maximum() - model->getDefaultConfirmTarget() + 2); + else + ui->sliderSmartFee->setValue(settings.value("nSmartFeeSliderPosition").toInt()); } } @@ -229,10 +235,17 @@ void SendCoinsDialog::on_sendButton_clicked() // prepare transaction for getting txFee earlier WalletModelTransaction currentTransaction(recipients); WalletModel::SendCoinsReturn prepareStatus; - if (model->getOptionsModel()->getCoinControlFeatures()) // coin control enabled - prepareStatus = model->prepareTransaction(currentTransaction, CoinControlDialog::coinControl); + + // Always use a CCoinControl instance, use the CoinControlDialog instance if CoinControl has been enabled + CCoinControl ctrl; + if (model->getOptionsModel()->getCoinControlFeatures()) + ctrl = *CoinControlDialog::coinControl; + if (ui->radioSmartFee->isChecked()) + ctrl.nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2; else - prepareStatus = model->prepareTransaction(currentTransaction); + ctrl.nConfirmTarget = 0; + + prepareStatus = model->prepareTransaction(currentTransaction, &ctrl); // process prepareStatus and on error generate message shown to user processSendCoinsReturn(prepareStatus, @@ -521,7 +534,7 @@ void SendCoinsDialog::processSendCoinsReturn(const WalletModel::SendCoinsReturn msgParams.second = CClientUIInterface::MSG_ERROR; break; case WalletModel::TransactionCommitFailed: - msgParams.first = tr("The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here."); + msgParams.first = tr("The transaction was rejected with the following reason: %1").arg(sendCoinsReturn.reasonCommitFailed); msgParams.second = CClientUIInterface::MSG_ERROR; break; case WalletModel::AbsurdFee: @@ -576,6 +589,7 @@ void SendCoinsDialog::updateFeeSectionControls() ui->labelFeeEstimation ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFeeNormal ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFeeFast ->setEnabled(ui->radioSmartFee->isChecked()); + ui->confirmationTargetLabel ->setEnabled(ui->radioSmartFee->isChecked()); ui->checkBoxMinimumFee ->setEnabled(ui->radioCustomFee->isChecked()); ui->labelMinFeeWarning ->setEnabled(ui->radioCustomFee->isChecked()); ui->radioCustomPerKilobyte ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked()); @@ -587,15 +601,17 @@ void SendCoinsDialog::updateGlobalFeeVariables() { if (ui->radioSmartFee->isChecked()) { - nTxConfirmTarget = defaultConfirmTarget - ui->sliderSmartFee->value(); + int nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2; payTxFee = CFeeRate(0); // set nMinimumTotalFee to 0 to not accidentally pay a custom fee CoinControlDialog::coinControl->nMinimumTotalFee = 0; + + // show the estimated reuquired time for confirmation + ui->confirmationTargetLabel->setText(GUIUtil::formatDurationStr(nConfirmTarget*600)+" / "+tr("%n block(s)", "", nConfirmTarget)); } else { - nTxConfirmTarget = defaultConfirmTarget; payTxFee = CFeeRate(ui->customFee->value()); // if user has selected to set a minimum absolute fee, pass the value to coincontrol @@ -630,7 +646,7 @@ void SendCoinsDialog::updateSmartFeeLabel() if(!model || !model->getOptionsModel()) return; - int nBlocksToConfirm = defaultConfirmTarget - ui->sliderSmartFee->value(); + int nBlocksToConfirm = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2; int estimateFoundAtBlocks = nBlocksToConfirm; CFeeRate feeRate = mempool.estimateSmartFee(nBlocksToConfirm, &estimateFoundAtBlocks); if (feeRate <= CFeeRate(0)) // not enough data => minfee @@ -701,6 +717,8 @@ void SendCoinsDialog::coinControlFeatureChanged(bool checked) if (!checked && model) // coin control features disabled CoinControlDialog::coinControl->SetNull(); + // make sure we set back the confirmation target + updateGlobalFeeVariables(); coinControlUpdateLabels(); } diff --git a/src/qt/sendcoinsdialog.h b/src/qt/sendcoinsdialog.h index 83dac0bd11..b0df495a98 100644 --- a/src/qt/sendcoinsdialog.h +++ b/src/qt/sendcoinsdialog.h @@ -26,8 +26,6 @@ QT_BEGIN_NAMESPACE class QUrl; QT_END_NAMESPACE -const int defaultConfirmTarget = 25; - /** Dialog for sending bitcoins */ class SendCoinsDialog : public QDialog { diff --git a/src/qt/signverifymessagedialog.cpp b/src/qt/signverifymessagedialog.cpp index 3e42f3a7b0..e28a6b47af 100644 --- a/src/qt/signverifymessagedialog.cpp +++ b/src/qt/signverifymessagedialog.cpp @@ -12,7 +12,7 @@ #include "base58.h" #include "init.h" -#include "main.h" // For strMessageMagic +#include "validation.h" // For strMessageMagic #include "wallet/wallet.h" #include <string> diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index cd27385653..1b6e28e93e 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -147,6 +147,7 @@ void SplashScreen::slotFinish(QWidget *mainWin) if (isMinimized()) showNormal(); hide(); + deleteLater(); // No more need for this } static void InitMessage(SplashScreen *splash, const std::string &message) diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp index 3dae33bafb..69757f9a93 100644 --- a/src/qt/test/rpcnestedtests.cpp +++ b/src/qt/test/rpcnestedtests.cpp @@ -6,7 +6,7 @@ #include "chainparams.h" #include "consensus/validation.h" -#include "main.h" +#include "validation.h" #include "rpc/register.h" #include "rpc/server.h" #include "rpcconsole.h" diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp index 65144e7865..e7d03a6119 100644 --- a/src/qt/transactiondesc.cpp +++ b/src/qt/transactiondesc.cpp @@ -11,7 +11,7 @@ #include "base58.h" #include "consensus/consensus.h" -#include "main.h" +#include "validation.h" #include "script/script.h" #include "timedata.h" #include "util.h" @@ -26,10 +26,10 @@ QString TransactionDesc::FormatTxStatus(const CWalletTx& wtx) AssertLockHeld(cs_main); if (!CheckFinalTx(wtx)) { - if (wtx.nLockTime < LOCKTIME_THRESHOLD) - return tr("Open for %n more block(s)", "", wtx.nLockTime - chainActive.Height()); + if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD) + return tr("Open for %n more block(s)", "", wtx.tx->nLockTime - chainActive.Height()); else - return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.nLockTime)); + return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.tx->nLockTime)); } else { @@ -133,7 +133,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco // Coinbase // CAmount nUnmatured = 0; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) nUnmatured += wallet->GetCredit(txout, ISMINE_ALL); strHTML += "<b>" + tr("Credit") + ":</b> "; if (wtx.IsInMainChain()) @@ -152,14 +152,14 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco else { isminetype fAllFromMe = ISMINE_SPENDABLE; - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { isminetype mine = wallet->IsMine(txin); if(fAllFromMe > mine) fAllFromMe = mine; } isminetype fAllToMe = ISMINE_SPENDABLE; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { isminetype mine = wallet->IsMine(txout); if(fAllToMe > mine) fAllToMe = mine; @@ -173,7 +173,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco // // Debit // - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { // Ignore change isminetype toSelf = wallet->IsMine(txout); @@ -212,7 +212,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco strHTML += "<b>" + tr("Total credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, nValue) + "<br>"; } - CAmount nTxFee = nDebit - wtx.GetValueOut(); + CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); if (nTxFee > 0) strHTML += "<b>" + tr("Transaction fee") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -nTxFee) + "<br>"; } @@ -221,10 +221,10 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco // // Mixed debit transaction // - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) if (wallet->IsMine(txin)) strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>"; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) if (wallet->IsMine(txout)) strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>"; } @@ -241,7 +241,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco strHTML += "<br><b>" + tr("Comment") + ":</b><br>" + GUIUtil::HtmlEscape(wtx.mapValue["comment"], true) + "<br>"; strHTML += "<b>" + tr("Transaction ID") + ":</b> " + rec->getTxID() + "<br>"; - strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.GetTotalSize()) + " bytes<br>"; + strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>"; strHTML += "<b>" + tr("Output index") + ":</b> " + QString::number(rec->getOutputIndex()) + "<br>"; // Message from normal bitcoin:URI (bitcoin:123...?message=example) @@ -276,20 +276,20 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco if (fDebug) { strHTML += "<hr><br>" + tr("Debug information") + "<br><br>"; - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) if(wallet->IsMine(txin)) strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>"; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) if(wallet->IsMine(txout)) strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>"; strHTML += "<br><b>" + tr("Transaction") + ":</b><br>"; - strHTML += GUIUtil::HtmlEscape(wtx.ToString(), true); + strHTML += GUIUtil::HtmlEscape(wtx.tx->ToString(), true); strHTML += "<br><b>" + tr("Inputs") + ":</b>"; strHTML += "<ul>"; - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { COutPoint prevout = txin.prevout; diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 4fe47181f6..a1d1422317 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -6,7 +6,7 @@ #include "base58.h" #include "consensus/consensus.h" -#include "main.h" +#include "validation.h" #include "timedata.h" #include "wallet/wallet.h" @@ -47,7 +47,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet * // // Credit // - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { isminetype mine = wallet->IsMine(txout); if(mine) @@ -83,7 +83,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet * { bool involvesWatchAddress = false; isminetype fAllFromMe = ISMINE_SPENDABLE; - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { isminetype mine = wallet->IsMine(txin); if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; @@ -91,7 +91,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet * } isminetype fAllToMe = ISMINE_SPENDABLE; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { isminetype mine = wallet->IsMine(txout); if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; @@ -112,11 +112,11 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet * // // Debit // - CAmount nTxFee = nDebit - wtx.GetValueOut(); + CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); - for (unsigned int nOut = 0; nOut < wtx.vout.size(); nOut++) + for (unsigned int nOut = 0; nOut < wtx.tx->vout.size(); nOut++) { - const CTxOut& txout = wtx.vout[nOut]; + const CTxOut& txout = wtx.tx->vout[nOut]; TransactionRecord sub(hash, nTime); sub.idx = parts.size(); sub.involvesWatchAddress = involvesWatchAddress; @@ -190,15 +190,15 @@ void TransactionRecord::updateStatus(const CWalletTx &wtx) if (!CheckFinalTx(wtx)) { - if (wtx.nLockTime < LOCKTIME_THRESHOLD) + if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD) { status.status = TransactionStatus::OpenUntilBlock; - status.open_for = wtx.nLockTime - chainActive.Height(); + status.open_for = wtx.tx->nLockTime - chainActive.Height(); } else { status.status = TransactionStatus::OpenUntilDate; - status.open_for = wtx.nLockTime; + status.open_for = wtx.tx->nLockTime; } } // For generated transactions, determine maturity diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index 52261ff04b..da0742aa6a 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -14,7 +14,7 @@ #include "walletmodel.h" #include "core_io.h" -#include "main.h" +#include "validation.h" #include "sync.h" #include "uint256.h" #include "util.h" diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 856b16d2c4..f5cbde6238 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -37,7 +37,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *parent) : QWidget(parent), model(0), transactionProxyModel(0), - transactionView(0), abandonAction(0) + transactionView(0), abandonAction(0), columnResizingFixer(0) { // Build filter row setContentsMargins(0,0,0,0); @@ -147,7 +147,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa QAction *editLabelAction = new QAction(tr("Edit label"), this); QAction *showDetailsAction = new QAction(tr("Show transaction details"), this); - contextMenu = new QMenu(); + contextMenu = new QMenu(this); contextMenu->addAction(copyAddressAction); contextMenu->addAction(copyLabelAction); contextMenu->addAction(copyAmountAction); @@ -212,7 +212,7 @@ void TransactionView::setModel(WalletModel *_model) transactionView->setColumnWidth(TransactionTableModel::Type, TYPE_COLUMN_WIDTH); transactionView->setColumnWidth(TransactionTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH); - columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(transactionView, AMOUNT_MINIMUM_COLUMN_WIDTH, MINIMUM_COLUMN_WIDTH); + columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(transactionView, AMOUNT_MINIMUM_COLUMN_WIDTH, MINIMUM_COLUMN_WIDTH, this); if (_model->getOptionsModel()) { diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index 947bcdb15a..4ec022881c 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -171,22 +171,20 @@ ShutdownWindow::ShutdownWindow(QWidget *parent, Qt::WindowFlags f): setLayout(layout); } -void ShutdownWindow::showShutdownWindow(BitcoinGUI *window) +QWidget *ShutdownWindow::showShutdownWindow(BitcoinGUI *window) { if (!window) - return; + return nullptr; // Show a simple window indicating shutdown status QWidget *shutdownWindow = new ShutdownWindow(); - // We don't hold a direct pointer to the shutdown window after creation, so use - // Qt::WA_DeleteOnClose to make sure that the window will be deleted eventually. - shutdownWindow->setAttribute(Qt::WA_DeleteOnClose); shutdownWindow->setWindowTitle(window->windowTitle()); // Center shutdown window at where main window was const QPoint global = window->mapToGlobal(window->rect().center()); shutdownWindow->move(global.x() - shutdownWindow->width() / 2, global.y() - shutdownWindow->height() / 2); shutdownWindow->show(); + return shutdownWindow; } void ShutdownWindow::closeEvent(QCloseEvent *event) diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h index 843bd7f67b..b930429578 100644 --- a/src/qt/utilitydialog.h +++ b/src/qt/utilitydialog.h @@ -43,7 +43,7 @@ class ShutdownWindow : public QWidget public: ShutdownWindow(QWidget *parent=0, Qt::WindowFlags f=0); - static void showShutdownWindow(BitcoinGUI *window); + static QWidget *showShutdownWindow(BitcoinGUI *window); protected: void closeEvent(QCloseEvent *event); diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 305cb4fefa..afc72fae69 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -5,6 +5,7 @@ #include "walletmodel.h" #include "addresstablemodel.h" +#include "consensus/validation.h" #include "guiconstants.h" #include "guiutil.h" #include "paymentserver.h" @@ -13,9 +14,11 @@ #include "base58.h" #include "keystore.h" -#include "main.h" +#include "validation.h" +#include "net.h" // for g_connman #include "sync.h" #include "ui_interface.h" +#include "util.h" // for GetBoolArg #include "wallet/wallet.h" #include "wallet/walletdb.h" // for BackupWallet @@ -64,7 +67,7 @@ CAmount WalletModel::getBalance(const CCoinControl *coinControl) const wallet->AvailableCoins(vCoins, true, coinControl); BOOST_FOREACH(const COutput& out, vCoins) if(out.fSpendable) - nBalance += out.tx->vout[out.i].nValue; + nBalance += out.tx->tx->vout[out.i].nValue; return nBalance; } @@ -328,12 +331,12 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran } CReserveKey *keyChange = transaction.getPossibleKeyChange(); - if(!wallet->CommitTransaction(*newTx, *keyChange, g_connman.get())) - return TransactionCommitFailed; + CValidationState state; + if(!wallet->CommitTransaction(*newTx, *keyChange, g_connman.get(), state)) + return SendCoinsReturn(TransactionCommitFailed, QString::fromStdString(state.GetRejectReason())); - CTransaction* t = (CTransaction*)newTx; CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); - ssTx << *t; + ssTx << *newTx->tx; transaction_array.append(&(ssTx[0]), ssTx.size()); } @@ -605,7 +608,7 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins) int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain(); if (nDepth < 0) continue; COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true); - if (outpoint.n < out.tx->vout.size() && wallet->IsMine(out.tx->vout[outpoint.n]) == ISMINE_SPENDABLE) + if (outpoint.n < out.tx->tx->vout.size() && wallet->IsMine(out.tx->tx->vout[outpoint.n]) == ISMINE_SPENDABLE) vCoins.push_back(out); } @@ -613,14 +616,14 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins) { COutput cout = out; - while (wallet->IsChange(cout.tx->vout[cout.i]) && cout.tx->vin.size() > 0 && wallet->IsMine(cout.tx->vin[0])) + while (wallet->IsChange(cout.tx->tx->vout[cout.i]) && cout.tx->tx->vin.size() > 0 && wallet->IsMine(cout.tx->tx->vin[0])) { - if (!wallet->mapWallet.count(cout.tx->vin[0].prevout.hash)) break; - cout = COutput(&wallet->mapWallet[cout.tx->vin[0].prevout.hash], cout.tx->vin[0].prevout.n, 0, true, true); + if (!wallet->mapWallet.count(cout.tx->tx->vin[0].prevout.hash)) break; + cout = COutput(&wallet->mapWallet[cout.tx->tx->vin[0].prevout.hash], cout.tx->tx->vin[0].prevout.n, 0, true, true); } CTxDestination address; - if(!out.fSpendable || !ExtractDestination(cout.tx->vout[cout.i].scriptPubKey, address)) + if(!out.fSpendable || !ExtractDestination(cout.tx->tx->vout[cout.i].scriptPubKey, address)) continue; mapCoins[QString::fromStdString(CBitcoinAddress(address).ToString())].push_back(out); } @@ -698,3 +701,8 @@ bool WalletModel::hdEnabled() const { return wallet->IsHDEnabled(); } + +int WalletModel::getDefaultConfirmTarget() const +{ + return nTxConfirmTarget; +} diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h index cdac60da36..eedf6e8cea 100644 --- a/src/qt/walletmodel.h +++ b/src/qt/walletmodel.h @@ -65,7 +65,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { std::string sAddress = address.toStdString(); std::string sLabel = label.toStdString(); std::string sMessage = message.toStdString(); @@ -144,9 +144,13 @@ public: // Return status record for SendCoins, contains error id + information struct SendCoinsReturn { - SendCoinsReturn(StatusCode _status = OK): - status(_status) {} + SendCoinsReturn(StatusCode _status = OK, QString _reasonCommitFailed = "") + : status(_status), + reasonCommitFailed(_reasonCommitFailed) + { + } StatusCode status; + QString reasonCommitFailed; }; // prepare transaction for getting txfee before sending coins @@ -207,6 +211,8 @@ public: bool hdEnabled() const; + int getDefaultConfirmTarget() const; + private: CWallet *wallet; bool fHaveWatchOnly; diff --git a/src/qt/walletmodeltransaction.cpp b/src/qt/walletmodeltransaction.cpp index fdec6a1c86..f9a95bed42 100644 --- a/src/qt/walletmodeltransaction.cpp +++ b/src/qt/walletmodeltransaction.cpp @@ -64,7 +64,7 @@ void WalletModelTransaction::reassignAmounts(int nChangePosRet) if (out.amount() <= 0) continue; if (i == nChangePosRet) i++; - subtotal += walletTransaction->vout[i].nValue; + subtotal += walletTransaction->tx->vout[i].nValue; i++; } rcp.amount = subtotal; @@ -73,7 +73,7 @@ void WalletModelTransaction::reassignAmounts(int nChangePosRet) { if (i == nChangePosRet) i++; - rcp.amount = walletTransaction->vout[i].nValue; + rcp.amount = walletTransaction->tx->vout[i].nValue; i++; } } diff --git a/src/random.cpp b/src/random.cpp index aa027e49c4..c2605b45bd 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -11,7 +11,6 @@ #include "compat.h" // for Windows API #include <wincrypt.h> #endif -#include "serialize.h" // for begin_ptr(vec) #include "util.h" // for LogPrint() #include "utilstrencodings.h" // for GetTime() @@ -72,15 +71,15 @@ static void RandAddSeedPerfmon() const size_t nMaxSize = 10000000; // Bail out at more than 10MB of performance data while (true) { nSize = vData.size(); - ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", NULL, NULL, begin_ptr(vData), &nSize); + ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", NULL, NULL, vData.data(), &nSize); if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize) break; vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially } RegCloseKey(HKEY_PERFORMANCE_DATA); if (ret == ERROR_SUCCESS) { - RAND_add(begin_ptr(vData), nSize, nSize / 100.0); - memory_cleanse(begin_ptr(vData), nSize); + RAND_add(vData.data(), nSize, nSize / 100.0); + memory_cleanse(vData.data(), nSize); LogPrint("rand", "%s: %lu bytes\n", __func__, nSize); } else { static bool warned = false; // Warn only once diff --git a/src/rest.cpp b/src/rest.cpp index b8b5420626..6b6b7401f6 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -7,7 +7,7 @@ #include "chainparams.h" #include "primitives/block.h" #include "primitives/transaction.h" -#include "main.h" +#include "validation.h" #include "httpserver.h" #include "rpc/server.h" #include "streams.h" @@ -50,7 +50,7 @@ struct CCoin { ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(nTxVer); READWRITE(nHeight); @@ -228,7 +228,7 @@ static bool rest_block(HTTPRequest* req, return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } - CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; switch (rf) { @@ -363,12 +363,12 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart) if (!ParseHashStr(hashStr, hash)) return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); - CTransaction tx; + CTransactionRef tx; uint256 hashBlock = uint256(); if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true)) return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); - CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssTx << tx; switch (rf) { @@ -388,7 +388,7 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart) case RF_JSON: { UniValue objTx(UniValue::VOBJ); - TxToJSON(tx, hashBlock, objTx); + TxToJSON(*tx, hashBlock, objTx); string strJSON = objTx.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f538ddcc04..1139c2aa5c 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -9,7 +9,7 @@ #include "checkpoints.h" #include "coins.h" #include "consensus/validation.h" -#include "main.h" +#include "validation.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "rpc/server.h" @@ -119,16 +119,16 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool tx result.push_back(Pair("versionHex", strprintf("%08x", block.nVersion))); result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex())); UniValue txs(UniValue::VARR); - BOOST_FOREACH(const CTransaction&tx, block.vtx) + for(const auto& tx : block.vtx) { if(txDetails) { UniValue objTx(UniValue::VOBJ); - TxToJSON(tx, uint256(), objTx); + TxToJSON(*tx, uint256(), objTx); txs.push_back(objTx); } else - txs.push_back(tx.GetHash().GetHex()); + txs.push_back(tx->GetHash().GetHex()); } result.push_back(Pair("tx", txs)); result.push_back(Pair("time", block.GetBlockTime())); @@ -151,7 +151,7 @@ UniValue getblockcount(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 0) throw runtime_error( "getblockcount\n" - "\nReturns the number of blocks in the longest block chain.\n" + "\nReturns the number of blocks in the longest blockchain.\n" "\nResult:\n" "n (numeric) The current block count\n" "\nExamples:\n" @@ -168,7 +168,7 @@ UniValue getbestblockhash(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 0) throw runtime_error( "getbestblockhash\n" - "\nReturns the hash of the best (tip) block in the longest block chain.\n" + "\nReturns the hash of the best (tip) block in the longest blockchain.\n" "\nResult\n" "\"hex\" (string) the block hash hex encoded\n" "\nExamples\n" @@ -194,12 +194,12 @@ UniValue waitfornewblock(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) throw runtime_error( - "waitfornewblock\n" + "waitfornewblock (timeout)\n" "\nWaits for a specific new block and returns useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" - "1. timeout (milliseconds) (int, optional, default=false)\n" - "\nResult::\n" + "1. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n" + "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" @@ -232,13 +232,13 @@ UniValue waitforblock(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw runtime_error( - "waitforblock\n" + "waitforblock <blockhash> (timeout)\n" "\nWaits for a specific new block and returns useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. blockhash to wait for (string)\n" - "2. timeout (milliseconds) (int, optional, default=false)\n" - "\nResult::\n" + "2. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n" + "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" @@ -274,14 +274,14 @@ UniValue waitforblockheight(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw runtime_error( - "waitforblock\n" + "waitforblockheight <blockheight> (timeout)\n" "\nWaits for (at least) block height and returns the height and hash\n" - "\nof the current tip.\n" + "of the current tip.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. block height to wait for (int)\n" - "2. timeout (milliseconds) (int, optional, default=false)\n" - "\nResult::\n" + "2. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n" + "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" @@ -751,7 +751,7 @@ UniValue getblock(const JSONRPCRequest& request) if (!fVerbose) { - CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; @@ -863,7 +863,7 @@ UniValue gettxout(const JSONRPCRequest& request) "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" "2. n (numeric, required) vout number\n" - "3. includemempool (boolean, optional) Whether to include the mem pool\n" + "3. includemempool (boolean, optional) Whether to include the mempool\n" "\nResult:\n" "{\n" " \"bestblock\" : \"hash\", (string) the block hash\n" @@ -1027,7 +1027,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 0) throw runtime_error( "getblockchaininfo\n" - "Returns an object containing various state info regarding block chain processing.\n" + "Returns an object containing various state info regarding blockchain processing.\n" "\nResult:\n" "{\n" " \"chain\": \"xxxx\", (string) current network name as defined in BIP70 (main, test, regtest)\n" @@ -1319,7 +1319,7 @@ UniValue invalidateblock(const JSONRPCRequest& request) } if (state.IsValid()) { - ActivateBestChain(state, Params(), NULL); + ActivateBestChain(state, Params()); } if (!state.IsValid()) { @@ -1357,7 +1357,7 @@ UniValue reconsiderblock(const JSONRPCRequest& request) } CValidationState state; - ActivateBestChain(state, Params(), NULL); + ActivateBestChain(state, Params()); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 8370a0f43e..03992a278d 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -109,6 +109,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "prioritisetransaction", 2 }, { "setban", 2 }, { "setban", 3 }, + { "setnetworkactive", 0 }, { "getmempoolancestors", 1 }, { "getmempooldescendants", 1 }, }; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index d509dd691f..73797e2019 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -12,7 +12,7 @@ #include "consensus/validation.h" #include "core_io.h" #include "init.h" -#include "main.h" +#include "validation.h" #include "miner.h" #include "net.h" #include "pow.h" @@ -131,8 +131,8 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG if (pblock->nNonce == nInnerLoopCount) { continue; } - CValidationState state; - if (!ProcessNewBlock(state, Params(), NULL, pblock, true, NULL)) + std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); + if (!ProcessNewBlock(Params(), shared_pblock, true, NULL)) throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); @@ -230,7 +230,7 @@ UniValue getmininginfo(const JSONRPCRequest& request) " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"errors\": \"...\" (string) Current errors\n" " \"networkhashps\": nnn, (numeric) The network hashes per second\n" - " \"pooledtx\": n (numeric) The size of the mem pool\n" + " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as defined in BIP70 (main, test, regtest)\n" "}\n" "\nExamples:\n" @@ -558,7 +558,8 @@ UniValue getblocktemplate(const JSONRPCRequest& request) UniValue transactions(UniValue::VARR); map<uint256, int64_t> setTxIndex; int i = 0; - BOOST_FOREACH (CTransaction& tx, pblock->vtx) { + for (const auto& it : pblock->vtx) { + const CTransaction& tx = *it; uint256 txHash = tx.GetHash(); setTxIndex[txHash] = i++; @@ -663,7 +664,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); - result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0].vout[0].nValue)); + result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue)); result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast))); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); @@ -680,7 +681,9 @@ UniValue getblocktemplate(const JSONRPCRequest& request) result.push_back(Pair("curtime", pblock->GetBlockTime())); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1))); - if (!pblocktemplate->vchCoinbaseCommitment.empty()) { + + const struct BIP9DeploymentInfo& segwit_info = VersionBitsDeploymentInfo[Consensus::DEPLOYMENT_SEGWIT]; + if (!pblocktemplate->vchCoinbaseCommitment.empty() && setClientRules.find(segwit_info.name) != setClientRules.end()) { result.push_back(Pair("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment.begin(), pblocktemplate->vchCoinbaseCommitment.end()))); } @@ -726,7 +729,8 @@ UniValue submitblock(const JSONRPCRequest& request) + HelpExampleRpc("submitblock", "\"mydata\"") ); - CBlock block; + std::shared_ptr<CBlock> blockptr = std::make_shared<CBlock>(); + CBlock& block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); @@ -754,10 +758,9 @@ UniValue submitblock(const JSONRPCRequest& request) } } - CValidationState state; submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); - bool fAccepted = ProcessNewBlock(state, Params(), NULL, &block, true, NULL); + bool fAccepted = ProcessNewBlock(Params(), blockptr, true, NULL); UnregisterValidationInterface(&sc); if (fBlockPresent) { @@ -765,13 +768,9 @@ UniValue submitblock(const JSONRPCRequest& request) return "duplicate-inconclusive"; return "duplicate"; } - if (fAccepted) - { - if (!sc.found) - return "inconclusive"; - state = sc.state; - } - return BIP22ValidationResult(state); + if (!sc.found) + return "inconclusive"; + return BIP22ValidationResult(sc.state); } UniValue estimatefee(const JSONRPCRequest& request) @@ -788,6 +787,8 @@ UniValue estimatefee(const JSONRPCRequest& request) "\n" "A negative value is returned if not enough transactions and blocks\n" "have been observed to make an estimate.\n" + "-1 is always returned for nblocks == 1 as it is impossible to calculate\n" + "a fee that is high enough to get reliably included in the next block.\n" "\nExample:\n" + HelpExampleCli("estimatefee", "6") ); @@ -810,7 +811,7 @@ UniValue estimatepriority(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 1) throw runtime_error( "estimatepriority nblocks\n" - "\nEstimates the approximate priority a zero-fee transaction needs to begin\n" + "\nDEPRECATED. Estimates the approximate priority a zero-fee transaction needs to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric)\n" @@ -873,7 +874,7 @@ UniValue estimatesmartpriority(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 1) throw runtime_error( "estimatesmartpriority nblocks\n" - "\nWARNING: This interface is unstable and may disappear or change!\n" + "\nDEPRECATED. WARNING: This interface is unstable and may disappear or change!\n" "\nEstimates the approximate priority a zero-fee transaction needs to begin\n" "confirmation within nblocks blocks if possible and return the number of blocks\n" "for which the estimate is valid.\n" diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index eaef4856b3..2aaee7f3f5 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -6,7 +6,7 @@ #include "base58.h" #include "clientversion.h" #include "init.h" -#include "main.h" +#include "validation.h" #include "net.h" #include "netbase.h" #include "rpc/server.h" @@ -57,7 +57,7 @@ UniValue getinfo(const JSONRPCRequest& request) " \"proxy\": \"host:port\", (string, optional) the proxy used by the server\n" " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" " \"testnet\": true|false, (boolean) if the server is using testnet or not\n" - " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since GMT epoch) of the oldest pre-generated key in the key pool\n" + " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since Unix epoch) of the oldest pre-generated key in the key pool\n" " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated\n" " \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n" " \"paytxfee\": x.xxxx, (numeric) the transaction fee set in " + CURRENCY_UNIT + "/kB\n" @@ -450,10 +450,53 @@ UniValue setmocktime(const JSONRPCRequest& request) return NullUniValue; } +static UniValue RPCLockedMemoryInfo() +{ + LockedPool::Stats stats = LockedPoolManager::Instance().stats(); + UniValue obj(UniValue::VOBJ); + obj.push_back(Pair("used", uint64_t(stats.used))); + obj.push_back(Pair("free", uint64_t(stats.free))); + obj.push_back(Pair("total", uint64_t(stats.total))); + obj.push_back(Pair("locked", uint64_t(stats.locked))); + obj.push_back(Pair("chunks_used", uint64_t(stats.chunks_used))); + obj.push_back(Pair("chunks_free", uint64_t(stats.chunks_free))); + return obj; +} + +UniValue getmemoryinfo(const JSONRPCRequest& request) +{ + /* Please, avoid using the word "pool" here in the RPC interface or help, + * as users will undoubtedly confuse it with the other "memory pool" + */ + if (request.fHelp || request.params.size() != 0) + throw runtime_error( + "getmemoryinfo\n" + "Returns an object containing information about memory usage.\n" + "\nResult:\n" + "{\n" + " \"locked\": { (json object) Information about locked memory manager\n" + " \"used\": xxxxx, (numeric) Number of bytes used\n" + " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n" + " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n" + " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n" + " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n" + " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n" + " }\n" + "}\n" + "\nExamples:\n" + + HelpExampleCli("getmemoryinfo", "") + + HelpExampleRpc("getmemoryinfo", "") + ); + UniValue obj(UniValue::VOBJ); + obj.push_back(Pair("locked", RPCLockedMemoryInfo())); + return obj; +} + static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // --------------------- ------------------------ ----------------------- ---------- { "control", "getinfo", &getinfo, true }, /* uses wallet if enabled */ + { "control", "getmemoryinfo", &getmemoryinfo, true }, { "util", "validateaddress", &validateaddress, true }, /* uses wallet if enabled */ { "util", "createmultisig", &createmultisig, true }, { "util", "verifymessage", &verifymessage, true }, diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 2b43f08f0b..53c0f993dc 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -6,8 +6,9 @@ #include "chainparams.h" #include "clientversion.h" -#include "main.h" +#include "validation.h" #include "net.h" +#include "net_processing.h" #include "netbase.h" #include "protocol.h" #include "sync.h" @@ -331,7 +332,7 @@ UniValue getnettotals(const JSONRPCRequest& request) "{\n" " \"totalbytesrecv\": n, (numeric) Total bytes received\n" " \"totalbytessent\": n, (numeric) Total bytes sent\n" - " \"timemillis\": t, (numeric) Total cpu time\n" + " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n" " \"uploadtarget\":\n" " {\n" " \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n" @@ -401,6 +402,7 @@ UniValue getnetworkinfo(const JSONRPCRequest& request) " \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n" " \"timeoffset\": xxxxx, (numeric) the time offset\n" " \"connections\": xxxxx, (numeric) the number of connections\n" + " \"networkactive\": true|false, (bool) whether p2p networking is enabled\n" " \"networks\": [ (array) information per network\n" " {\n" " \"name\": \"xxx\", (string) network (ipv4, ipv6 or onion)\n" @@ -435,8 +437,10 @@ UniValue getnetworkinfo(const JSONRPCRequest& request) obj.push_back(Pair("localservices", strprintf("%016x", g_connman->GetLocalServices()))); obj.push_back(Pair("localrelay", fRelayTxes)); obj.push_back(Pair("timeoffset", GetTimeOffset())); - if(g_connman) + if (g_connman) { + obj.push_back(Pair("networkactive", g_connman->GetNetworkActive())); obj.push_back(Pair("connections", (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL))); + } obj.push_back(Pair("networks", GetNetworksInfo())); obj.push_back(Pair("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()))); UniValue localAddresses(UniValue::VARR); @@ -571,6 +575,24 @@ UniValue clearbanned(const JSONRPCRequest& request) return NullUniValue; } +UniValue setnetworkactive(const JSONRPCRequest& request) +{ + if (request.fHelp || request.params.size() != 1) { + throw runtime_error( + "setnetworkactive true|false\n" + "Disable/enable all p2p network activity." + ); + } + + if (!g_connman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); + } + + g_connman->SetNetworkActive(request.params[0].get_bool()); + + return g_connman->GetNetworkActive(); +} + static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // --------------------- ------------------------ ----------------------- ---------- @@ -585,6 +607,7 @@ static const CRPCCommand commands[] = { "network", "setban", &setban, true }, { "network", "listbanned", &listbanned, true }, { "network", "clearbanned", &clearbanned, true }, + { "network", "setnetworkactive", &setnetworkactive, true, }, }; void RegisterNetRPCCommands(CRPCTable &t) diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 0656a61755..1229a00c5f 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -10,7 +10,7 @@ #include "core_io.h" #include "init.h" #include "keystore.h" -#include "main.h" +#include "validation.h" #include "merkleblock.h" #include "net.h" #include "policy/policy.h" @@ -135,17 +135,17 @@ UniValue getrawtransaction(const JSONRPCRequest& request) "or there is an unspent output in the utxo for this transaction. To make it always work,\n" "you need to maintain a transaction index, using the -txindex command line option.\n" "\nReturn the raw transaction data.\n" - "\nIf verbose=0, returns a string that is serialized, hex-encoded data for 'txid'.\n" - "If verbose is non-zero, returns an Object with information about 'txid'.\n" + "\nIf verbose is 'true', returns an Object with information about 'txid'.\n" + "If verbose is 'false' or omitted, returns a string that is serialized, hex-encoded data for 'txid'.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" - "2. verbose (numeric, optional, default=0) If 0, return a string, other return a json object\n" + "2. verbose (bool, optional, default=false) If true, return a string, other return a json object\n" - "\nResult (if verbose is not set or set to 0):\n" + "\nResult (if verbose is not set or set to false):\n" "\"data\" (string) The serialized, hex-encoded data for 'txid'\n" - "\nResult (if verbose > 0):\n" + "\nResult (if verbose is set to true):\n" "{\n" " \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n" " \"txid\" : \"id\", (string) The transaction id (same as provided)\n" @@ -192,31 +192,45 @@ UniValue getrawtransaction(const JSONRPCRequest& request) "\nExamples:\n" + HelpExampleCli("getrawtransaction", "\"mytxid\"") - + HelpExampleCli("getrawtransaction", "\"mytxid\" 1") - + HelpExampleRpc("getrawtransaction", "\"mytxid\", 1") + + HelpExampleCli("getrawtransaction", "\"mytxid\" true") + + HelpExampleRpc("getrawtransaction", "\"mytxid\", true") ); LOCK(cs_main); uint256 hash = ParseHashV(request.params[0], "parameter 1"); + // Accept either a bool (true) or a num (>=1) to indicate verbose output. bool fVerbose = false; - if (request.params.size() > 1) - fVerbose = (request.params[1].get_int() != 0); + if (request.params.size() > 1) { + if (request.params[1].isNum()) { + if (request.params[1].get_int() != 0) { + fVerbose = true; + } + } + else if(request.params[1].isBool()) { + if(request.params[1].isTrue()) { + fVerbose = true; + } + } + else { + throw JSONRPCError(RPC_TYPE_ERROR, "Invalid type provided. Verbose parameter must be a boolean."); + } + } - CTransaction tx; + CTransactionRef tx; uint256 hashBlock; if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true)) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available about transaction"); - string strHex = EncodeHexTx(tx); + string strHex = EncodeHexTx(*tx, RPCSerializationFlags()); if (!fVerbose) return strHex; UniValue result(UniValue::VOBJ); result.push_back(Pair("hex", strHex)); - TxToJSON(tx, hashBlock, result); + TxToJSON(*tx, hashBlock, result); return result; } @@ -275,7 +289,7 @@ UniValue gettxoutproof(const JSONRPCRequest& request) if (pblockindex == NULL) { - CTransaction tx; + CTransactionRef tx; if (!GetTransaction(oneTxid, tx, Params().GetConsensus(), hashBlock, false) || hashBlock.IsNull()) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not yet in block"); if (!mapBlockIndex.count(hashBlock)) @@ -288,8 +302,8 @@ UniValue gettxoutproof(const JSONRPCRequest& request) throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk"); unsigned int ntxFound = 0; - BOOST_FOREACH(const CTransaction&tx, block.vtx) - if (setTxids.count(tx.GetHash())) + for (const auto& tx : block.vtx) + if (setTxids.count(tx->GetHash())) ntxFound++; if (ntxFound != setTxids.size()) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "(Not all) transactions not found in specified block"); @@ -506,13 +520,13 @@ UniValue decoderawtransaction(const JSONRPCRequest& request) LOCK(cs_main); RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VSTR)); - CTransaction tx; + CMutableTransaction mtx; - if (!DecodeHexTx(tx, request.params[0].get_str(), true)) + if (!DecodeHexTx(mtx, request.params[0].get_str(), true)) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); UniValue result(UniValue::VOBJ); - TxToJSON(tx, uint256(), result); + TxToJSON(CTransaction(std::move(mtx)), uint256(), result); return result; } @@ -869,9 +883,10 @@ UniValue sendrawtransaction(const JSONRPCRequest& request) RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VSTR)(UniValue::VBOOL)); // parse hex string from parameter - CTransaction tx; - if (!DecodeHexTx(tx, request.params[0].get_str())) + CMutableTransaction mtx; + if (!DecodeHexTx(mtx, request.params[0].get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + CTransaction tx(std::move(mtx)); uint256 hashTx = tx.GetHash(); bool fLimitFree = false; diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 29d0bee1b2..2122819398 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -147,6 +147,8 @@ uint256 ParseHashV(const UniValue& v, string strName) strHex = v.get_str(); if (!IsHex(strHex)) // Note: IsHex("") is false throw JSONRPCError(RPC_INVALID_PARAMETER, strName+" must be hexadecimal string (not '"+strHex+"')"); + if (64 != strHex.length()) + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be of length %d (not %d)", strName, 64, strHex.length())); uint256 result; result.SetHex(strHex); return result; @@ -495,4 +497,12 @@ void RPCRunLater(const std::string& name, boost::function<void(void)> func, int6 deadlineTimers.emplace(name, std::unique_ptr<RPCTimerBase>(timerInterface->NewTimer(func, nSeconds*1000))); } +int RPCSerializationFlags() +{ + int flag = 0; + if (GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) == 0) + flag |= SERIALIZE_TRANSACTION_NO_WITNESS; + return flag; +} + CRPCTable tableRPC; diff --git a/src/rpc/server.h b/src/rpc/server.h index c59886222c..4fac68a51f 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -19,6 +19,8 @@ #include <univalue.h> +static const unsigned int DEFAULT_RPC_SERIALIZE_VERSION = 1; + class CRPCCommand; namespace RPCServer @@ -198,4 +200,7 @@ void StopRPC(); std::string JSONRPCExecBatch(const UniValue& vReq); void RPCNotifyBlockChange(bool ibd, const CBlockIndex *); +// Retrieves any serialization flags requested in command line argument +int RPCSerializationFlags(); + #endif // BITCOIN_RPCSERVER_H diff --git a/src/scheduler.cpp b/src/scheduler.cpp index 52777b61f9..27c03f154c 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -54,9 +54,10 @@ void CScheduler::serviceQueue() #else // Some boost versions have a conflicting overload of wait_until that returns void. // Explicitly use a template here to avoid hitting that overload. - while (!shouldStop() && !taskQueue.empty() && - newTaskScheduled.wait_until<>(lock, taskQueue.begin()->first) != boost::cv_status::timeout) { - // Keep waiting until timeout + while (!shouldStop() && !taskQueue.empty()) { + boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first; + if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) + break; // Exit loop after timeout, it means we reached the time of the event } #endif // If there are multiple threads, the queue can empty while we're waiting (another diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp index b629f4278b..036d6ca7bf 100644 --- a/src/script/bitcoinconsensus.cpp +++ b/src/script/bitcoinconsensus.cpp @@ -23,7 +23,7 @@ public: m_remaining(txToLen) {} - TxInputStream& read(char* pch, size_t nSize) + void read(char* pch, size_t nSize) { if (nSize > m_remaining) throw std::ios_base::failure(std::string(__func__) + ": end of data"); @@ -37,16 +37,17 @@ public: memcpy(pch, m_data, nSize); m_remaining -= nSize; m_data += nSize; - return *this; } template<typename T> TxInputStream& operator>>(T& obj) { - ::Unserialize(*this, obj, m_type, m_version); + ::Unserialize(*this, obj); return *this; } + int GetVersion() const { return m_version; } + int GetType() const { return m_type; } private: const int m_type; const int m_version; @@ -69,17 +70,25 @@ struct ECCryptoClosure ECCryptoClosure instance_of_eccryptoclosure; } +/** Check that all specified flags are part of the libconsensus interface. */ +static bool verify_flags(unsigned int flags) +{ + return (flags & ~(bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL)) == 0; +} + static int verify_script(const unsigned char *scriptPubKey, unsigned int scriptPubKeyLen, CAmount amount, const unsigned char *txTo , unsigned int txToLen, unsigned int nIn, unsigned int flags, bitcoinconsensus_error* err) { + if (!verify_flags(flags)) { + return bitcoinconsensus_ERR_INVALID_FLAGS; + } try { TxInputStream stream(SER_NETWORK, PROTOCOL_VERSION, txTo, txToLen); - CTransaction tx; - stream >> tx; + CTransaction tx(deserialize, stream); if (nIn >= tx.vin.size()) return set_error(err, bitcoinconsensus_ERR_TX_INDEX); - if (tx.GetSerializeSize(SER_NETWORK, PROTOCOL_VERSION) != txToLen) + if (GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) != txToLen) return set_error(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH); // Regardless of the verification result, the tx did not error. diff --git a/src/script/bitcoinconsensus.h b/src/script/bitcoinconsensus.h index 1d2d5c23e4..1bef4fe9e9 100644 --- a/src/script/bitcoinconsensus.h +++ b/src/script/bitcoinconsensus.h @@ -42,6 +42,7 @@ typedef enum bitcoinconsensus_error_t bitcoinconsensus_ERR_TX_SIZE_MISMATCH, bitcoinconsensus_ERR_TX_DESERIALIZE, bitcoinconsensus_ERR_AMOUNT_REQUIRED, + bitcoinconsensus_ERR_INVALID_FLAGS, } bitcoinconsensus_error; /** Script verification flags */ @@ -54,6 +55,9 @@ enum bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY = (1U << 9), // enable CHECKLOCKTIMEVERIFY (BIP65) bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY = (1U << 10), // enable CHECKSEQUENCEVERIFY (BIP112) bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS = (1U << 11), // enable WITNESS (BIP141) + bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL = bitcoinconsensus_SCRIPT_FLAGS_VERIFY_P2SH | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_DERSIG | + bitcoinconsensus_SCRIPT_FLAGS_VERIFY_NULLDUMMY | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY | + bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS }; /// Returns 1 if the input nIn of the serialized transaction pointed to by diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 0e17ddc130..1410d0b73f 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -856,15 +856,15 @@ bool EvalScript(vector<vector<unsigned char> >& stack, const CScript& script, un valtype& vch = stacktop(-1); valtype vchHash((opcode == OP_RIPEMD160 || opcode == OP_SHA1 || opcode == OP_HASH160) ? 20 : 32); if (opcode == OP_RIPEMD160) - CRIPEMD160().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash)); + CRIPEMD160().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_SHA1) - CSHA1().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash)); + CSHA1().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_SHA256) - CSHA256().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash)); + CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_HASH160) - CHash160().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash)); + CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_HASH256) - CHash256().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash)); + CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); popstack(stack); stack.push_back(vchHash); } @@ -1069,7 +1069,7 @@ public: /** Serialize the passed scriptCode, skipping OP_CODESEPARATORs */ template<typename S> - void SerializeScriptCode(S &s, int nType, int nVersion) const { + void SerializeScriptCode(S &s) const { CScript::const_iterator it = scriptCode.begin(); CScript::const_iterator itBegin = it; opcodetype opcode; @@ -1092,53 +1092,53 @@ public: /** Serialize an input of txTo */ template<typename S> - void SerializeInput(S &s, unsigned int nInput, int nType, int nVersion) const { + void SerializeInput(S &s, unsigned int nInput) const { // In case of SIGHASH_ANYONECANPAY, only the input being signed is serialized if (fAnyoneCanPay) nInput = nIn; // Serialize the prevout - ::Serialize(s, txTo.vin[nInput].prevout, nType, nVersion); + ::Serialize(s, txTo.vin[nInput].prevout); // Serialize the script if (nInput != nIn) // Blank out other inputs' signatures - ::Serialize(s, CScriptBase(), nType, nVersion); + ::Serialize(s, CScriptBase()); else - SerializeScriptCode(s, nType, nVersion); + SerializeScriptCode(s); // Serialize the nSequence if (nInput != nIn && (fHashSingle || fHashNone)) // let the others update at will - ::Serialize(s, (int)0, nType, nVersion); + ::Serialize(s, (int)0); else - ::Serialize(s, txTo.vin[nInput].nSequence, nType, nVersion); + ::Serialize(s, txTo.vin[nInput].nSequence); } /** Serialize an output of txTo */ template<typename S> - void SerializeOutput(S &s, unsigned int nOutput, int nType, int nVersion) const { + void SerializeOutput(S &s, unsigned int nOutput) const { if (fHashSingle && nOutput != nIn) // Do not lock-in the txout payee at other indices as txin - ::Serialize(s, CTxOut(), nType, nVersion); + ::Serialize(s, CTxOut()); else - ::Serialize(s, txTo.vout[nOutput], nType, nVersion); + ::Serialize(s, txTo.vout[nOutput]); } /** Serialize txTo */ template<typename S> - void Serialize(S &s, int nType, int nVersion) const { + void Serialize(S &s) const { // Serialize nVersion - ::Serialize(s, txTo.nVersion, nType, nVersion); + ::Serialize(s, txTo.nVersion); // Serialize vin unsigned int nInputs = fAnyoneCanPay ? 1 : txTo.vin.size(); ::WriteCompactSize(s, nInputs); for (unsigned int nInput = 0; nInput < nInputs; nInput++) - SerializeInput(s, nInput, nType, nVersion); + SerializeInput(s, nInput); // Serialize vout unsigned int nOutputs = fHashNone ? 0 : (fHashSingle ? nIn+1 : txTo.vout.size()); ::WriteCompactSize(s, nOutputs); for (unsigned int nOutput = 0; nOutput < nOutputs; nOutput++) - SerializeOutput(s, nOutput, nType, nVersion); + SerializeOutput(s, nOutput); // Serialize nLockTime - ::Serialize(s, txTo.nLockTime, nType, nVersion); + ::Serialize(s, txTo.nLockTime); } }; diff --git a/src/secp256k1/.gitignore b/src/secp256k1/.gitignore index efb277d347..87fea161ba 100644 --- a/src/secp256k1/.gitignore +++ b/src/secp256k1/.gitignore @@ -6,6 +6,7 @@ bench_schnorr_verify bench_recover bench_internal tests +exhaustive_tests gen_context *.exe *.so diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml index 2c5c63adad..2439529242 100644 --- a/src/secp256k1/.travis.yml +++ b/src/secp256k1/.travis.yml @@ -11,7 +11,7 @@ cache: - src/java/guava/ env: global: - - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no schnorr=no RECOVERY=no EXPERIMENTAL=no + - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no - GUAVA_URL=https://search.maven.org/remotecontent?filepath=com/google/guava/guava/18.0/guava-18.0.jar GUAVA_JAR=src/java/guava/guava-18.0.jar matrix: - SCALAR=32bit RECOVERY=yes @@ -22,15 +22,14 @@ env: - FIELD=64bit ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes - FIELD=64bit ASM=x86_64 - FIELD=64bit ENDOMORPHISM=yes ASM=x86_64 - - FIELD=32bit SCHNORR=yes EXPERIMENTAL=yes - FIELD=32bit ENDOMORPHISM=yes - BIGNUM=no - - BIGNUM=no ENDOMORPHISM=yes SCHNORR=yes RECOVERY=yes EXPERIMENTAL=yes + - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes - BIGNUM=no STATICPRECOMPUTATION=no - BUILD=distcheck - EXTRAFLAGS=CPPFLAGS=-DDETERMINISTIC - EXTRAFLAGS=CFLAGS=-O0 - - BUILD=check-java ECDH=yes SCHNORR=yes EXPERIMENTAL=yes + - BUILD=check-java ECDH=yes EXPERIMENTAL=yes matrix: fast_finish: true include: @@ -66,5 +65,5 @@ before_script: ./autogen.sh script: - if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi - if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi - - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-schnorr=$SCHNORR --enable-module-recovery=$RECOVERY $EXTRAFLAGS $USE_HOST && make -j2 $BUILD + - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY $EXTRAFLAGS $USE_HOST && make -j2 $BUILD os: linux diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am index 3d130bdcbd..e5657f7f31 100644 --- a/src/secp256k1/Makefile.am +++ b/src/secp256k1/Makefile.am @@ -12,9 +12,11 @@ noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h noinst_HEADERS += src/scalar_8x32.h +noinst_HEADERS += src/scalar_low.h noinst_HEADERS += src/scalar_impl.h noinst_HEADERS += src/scalar_4x64_impl.h noinst_HEADERS += src/scalar_8x32_impl.h +noinst_HEADERS += src/scalar_low_impl.h noinst_HEADERS += src/group.h noinst_HEADERS += src/group_impl.h noinst_HEADERS += src/num_gmp.h @@ -87,13 +89,23 @@ bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) endif +TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) tests_LDFLAGS = -static -TESTS = tests +TESTS += tests +endif + +if USE_EXHAUSTIVE_TESTS +noinst_PROGRAMS += exhaustive_tests +exhaustive_tests_SOURCES = src/tests_exhaustive.c +exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src $(SECP_INCLUDES) +exhaustive_tests_LDADD = $(SECP_LIBS) +exhaustive_tests_LDFLAGS = -static +TESTS += exhaustive_tests endif JAVAROOT=src/java @@ -154,10 +166,6 @@ if ENABLE_MODULE_ECDH include src/modules/ecdh/Makefile.am.include endif -if ENABLE_MODULE_SCHNORR -include src/modules/schnorr/Makefile.am.include -endif - if ENABLE_MODULE_RECOVERY include src/modules/recovery/Makefile.am.include endif diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 index b25d8adb92..b74acb8c13 100644 --- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -46,6 +46,10 @@ if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then ECDSA_sign(0, NULL, 0, NULL, NULL, eckey); ECDSA_verify(0, NULL, 0, NULL, 0, eckey); EC_KEY_free(eckey); + ECDSA_SIG *sig_openssl; + sig_openssl = ECDSA_SIG_new(); + (void)sig_openssl->r; + ECDSA_SIG_free(sig_openssl); ]])],[has_openssl_ec=yes],[has_openssl_ec=no]) AC_MSG_RESULT([$has_openssl_ec]) fi diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 0743c36690..ec50ffe3a2 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -104,6 +104,11 @@ AC_ARG_ENABLE(experimental, [use_experimental=$enableval], [use_experimental=no]) +AC_ARG_ENABLE(exhaustive_tests, + AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]), + [use_exhaustive_tests=$enableval], + [use_exhaustive_tests=yes]) + AC_ARG_ENABLE(endomorphism, AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]), [use_endomorphism=$enableval], @@ -119,11 +124,6 @@ AC_ARG_ENABLE(module_ecdh, [enable_module_ecdh=$enableval], [enable_module_ecdh=no]) -AC_ARG_ENABLE(module_schnorr, - AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signature module (experimental)]), - [enable_module_schnorr=$enableval], - [enable_module_schnorr=no]) - AC_ARG_ENABLE(module_recovery, AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]), [enable_module_recovery=$enableval], @@ -381,9 +381,6 @@ fi if test x"$use_jni" != x"no"; then AX_JNI_INCLUDE_DIR have_jni_dependencies=yes - if test x"$enable_module_schnorr" = x"no"; then - have_jni_dependencies=no - fi if test x"$enable_module_ecdh" = x"no"; then have_jni_dependencies=no fi @@ -392,7 +389,7 @@ if test x"$use_jni" != x"no"; then fi if test "x$have_jni_dependencies" = "xno"; then if test x"$use_jni" = x"yes"; then - AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and Schnorr and try again.]) + AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and try again.]) fi AC_MSG_WARN([jni headers/dependencies not found. jni support disabled]) use_jni=no @@ -413,7 +410,7 @@ if test x"$use_endomorphism" = x"yes"; then AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) fi -if test x"$use_ecmult_static_precomputation" = x"yes"; then +if test x"$set_precomp" = x"yes"; then AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) fi @@ -421,10 +418,6 @@ if test x"$enable_module_ecdh" = x"yes"; then AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module]) fi -if test x"$enable_module_schnorr" = x"yes"; then - AC_DEFINE(ENABLE_MODULE_SCHNORR, 1, [Define this symbol to enable the Schnorr signature module]) -fi - if test x"$enable_module_recovery" = x"yes"; then AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module]) fi @@ -442,7 +435,6 @@ AC_MSG_NOTICE([Using bignum implementation: $set_bignum]) AC_MSG_NOTICE([Using scalar implementation: $set_scalar]) AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism]) AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) -AC_MSG_NOTICE([Building Schnorr signatures module: $enable_module_schnorr]) AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery]) AC_MSG_NOTICE([Using jni: $use_jni]) @@ -451,12 +443,8 @@ if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([WARNING: experimental build]) AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.]) AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) - AC_MSG_NOTICE([Building Schnorr signatures module: $enable_module_schnorr]) AC_MSG_NOTICE([******]) else - if test x"$enable_module_schnorr" = x"yes"; then - AC_MSG_ERROR([Schnorr signature module is experimental. Use --enable-experimental to allow.]) - fi if test x"$enable_module_ecdh" = x"yes"; then AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.]) fi @@ -473,10 +461,10 @@ AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) +AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) -AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$use_ecmult_static_precomputation" = x"yes"]) +AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) -AM_CONDITIONAL([ENABLE_MODULE_SCHNORR], [test x"$enable_module_schnorr" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) AM_CONDITIONAL([USE_JNI], [test x"$use_jni" == x"yes"]) AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h index 7145dbcc54..f268e309d0 100644 --- a/src/secp256k1/include/secp256k1.h +++ b/src/secp256k1/include/secp256k1.h @@ -47,11 +47,8 @@ typedef struct secp256k1_context_struct secp256k1_context; * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage or transmission, use - * secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. - * - * Furthermore, it is guaranteed that identical public keys (ignoring - * compression) will have identical representation, so they can be memcmp'ed. + * If you need to convert to a format suitable for storage, transmission, or + * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. */ typedef struct { unsigned char data[64]; @@ -62,12 +59,9 @@ typedef struct { * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage or transmission, use - * the secp256k1_ecdsa_signature_serialize_* and + * If you need to convert to a format suitable for storage, transmission, or + * comparison, use the secp256k1_ecdsa_signature_serialize_* and * secp256k1_ecdsa_signature_serialize_* functions. - * - * Furthermore, it is guaranteed to identical signatures will have identical - * representation, so they can be memcmp'ed. */ typedef struct { unsigned char data[64]; diff --git a/src/secp256k1/include/secp256k1_schnorr.h b/src/secp256k1/include/secp256k1_schnorr.h deleted file mode 100644 index dc32fec1ea..0000000000 --- a/src/secp256k1/include/secp256k1_schnorr.h +++ /dev/null @@ -1,173 +0,0 @@ -#ifndef _SECP256K1_SCHNORR_ -# define _SECP256K1_SCHNORR_ - -# include "secp256k1.h" - -# ifdef __cplusplus -extern "C" { -# endif - -/** Create a signature using a custom EC-Schnorr-SHA256 construction. It - * produces non-malleable 64-byte signatures which support public key recovery - * batch validation, and multiparty signing. - * Returns: 1: signature created - * 0: the nonce generation function failed, or the private key was - * invalid. - * Args: ctx: pointer to a context object, initialized for signing - * (cannot be NULL) - * Out: sig64: pointer to a 64-byte array where the signature will be - * placed (cannot be NULL) - * In: msg32: the 32-byte message hash being signed (cannot be NULL) - * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp:pointer to a nonce generation function. If NULL, - * secp256k1_nonce_function_default is used - * ndata: pointer to arbitrary data used by the nonce generation - * function (can be NULL) - */ -SECP256K1_API int secp256k1_schnorr_sign( - const secp256k1_context* ctx, - unsigned char *sig64, - const unsigned char *msg32, - const unsigned char *seckey, - secp256k1_nonce_function noncefp, - const void *ndata -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Verify a signature created by secp256k1_schnorr_sign. - * Returns: 1: correct signature - * 0: incorrect signature - * Args: ctx: a secp256k1 context object, initialized for verification. - * In: sig64: the 64-byte signature being verified (cannot be NULL) - * msg32: the 32-byte message hash being verified (cannot be NULL) - * pubkey: the public key to verify with (cannot be NULL) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_verify( - const secp256k1_context* ctx, - const unsigned char *sig64, - const unsigned char *msg32, - const secp256k1_pubkey *pubkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Recover an EC public key from a Schnorr signature created using - * secp256k1_schnorr_sign. - * Returns: 1: public key successfully recovered (which guarantees a correct - * signature). - * 0: otherwise. - * Args: ctx: pointer to a context object, initialized for - * verification (cannot be NULL) - * Out: pubkey: pointer to a pubkey to set to the recovered public key - * (cannot be NULL). - * In: sig64: signature as 64 byte array (cannot be NULL) - * msg32: the 32-byte message hash assumed to be signed (cannot - * be NULL) - */ -SECP256K1_API int secp256k1_schnorr_recover( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const unsigned char *sig64, - const unsigned char *msg32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Generate a nonce pair deterministically for use with - * secp256k1_schnorr_partial_sign. - * Returns: 1: valid nonce pair was generated. - * 0: otherwise (nonce generation function failed) - * Args: ctx: pointer to a context object, initialized for signing - * (cannot be NULL) - * Out: pubnonce: public side of the nonce (cannot be NULL) - * privnonce32: private side of the nonce (32 byte) (cannot be NULL) - * In: msg32: the 32-byte message hash assumed to be signed (cannot - * be NULL) - * sec32: the 32-byte private key (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, - * secp256k1_nonce_function_default is used - * noncedata: pointer to arbitrary data used by the nonce generation - * function (can be NULL) - * - * Do not use the output as a private/public key pair for signing/validation. - */ -SECP256K1_API int secp256k1_schnorr_generate_nonce_pair( - const secp256k1_context* ctx, - secp256k1_pubkey *pubnonce, - unsigned char *privnonce32, - const unsigned char *msg32, - const unsigned char *sec32, - secp256k1_nonce_function noncefp, - const void* noncedata -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Produce a partial Schnorr signature, which can be combined using - * secp256k1_schnorr_partial_combine, to end up with a full signature that is - * verifiable using secp256k1_schnorr_verify. - * Returns: 1: signature created successfully. - * 0: no valid signature exists with this combination of keys, nonces - * and message (chance around 1 in 2^128) - * -1: invalid private key, nonce, or public nonces. - * Args: ctx: pointer to context object, initialized for signing (cannot - * be NULL) - * Out: sig64: pointer to 64-byte array to put partial signature in - * In: msg32: pointer to 32-byte message to sign - * sec32: pointer to 32-byte private key - * pubnonce_others: pointer to pubkey containing the sum of the other's - * nonces (see secp256k1_ec_pubkey_combine) - * secnonce32: pointer to 32-byte array containing our nonce - * - * The intended procedure for creating a multiparty signature is: - * - Each signer S[i] with private key x[i] and public key Q[i] runs - * secp256k1_schnorr_generate_nonce_pair to produce a pair (k[i],R[i]) of - * private/public nonces. - * - All signers communicate their public nonces to each other (revealing your - * private nonce can lead to discovery of your private key, so it should be - * considered secret). - * - All signers combine all the public nonces they received (excluding their - * own) using secp256k1_ec_pubkey_combine to obtain an - * Rall[i] = sum(R[0..i-1,i+1..n]). - * - All signers produce a partial signature using - * secp256k1_schnorr_partial_sign, passing in their own private key x[i], - * their own private nonce k[i], and the sum of the others' public nonces - * Rall[i]. - * - All signers communicate their partial signatures to each other. - * - Someone combines all partial signatures using - * secp256k1_schnorr_partial_combine, to obtain a full signature. - * - The resulting signature is validatable using secp256k1_schnorr_verify, with - * public key equal to the result of secp256k1_ec_pubkey_combine of the - * signers' public keys (sum(Q[0..n])). - * - * Note that secp256k1_schnorr_partial_combine and secp256k1_ec_pubkey_combine - * function take their arguments in any order, and it is possible to - * pre-combine several inputs already with one call, and add more inputs later - * by calling the function again (they are commutative and associative). - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_sign( - const secp256k1_context* ctx, - unsigned char *sig64, - const unsigned char *msg32, - const unsigned char *sec32, - const secp256k1_pubkey *pubnonce_others, - const unsigned char *secnonce32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); - -/** Combine multiple Schnorr partial signatures. - * Returns: 1: the passed signatures were successfully combined. - * 0: the resulting signature is not valid (chance of 1 in 2^256) - * -1: some inputs were invalid, or the signatures were not created - * using the same set of nonces - * Args: ctx: pointer to a context object - * Out: sig64: pointer to a 64-byte array to place the combined signature - * (cannot be NULL) - * In: sig64sin: pointer to an array of n pointers to 64-byte input - * signatures - * n: the number of signatures to combine (at least 1) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_combine( - const secp256k1_context* ctx, - unsigned char *sig64, - const unsigned char * const * sig64sin, - size_t n -) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -# ifdef __cplusplus -} -# endif - -#endif diff --git a/src/secp256k1/src/ecdsa_impl.h b/src/secp256k1/src/ecdsa_impl.h index d110b4bb1d..9a42e519bd 100644 --- a/src/secp256k1/src/ecdsa_impl.h +++ b/src/secp256k1/src/ecdsa_impl.h @@ -203,7 +203,9 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) { unsigned char c[32]; secp256k1_scalar sn, u1, u2; +#if !defined(EXHAUSTIVE_TEST_ORDER) secp256k1_fe xr; +#endif secp256k1_gej pubkeyj; secp256k1_gej pr; @@ -219,6 +221,21 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const if (secp256k1_gej_is_infinity(&pr)) { return 0; } + +#if defined(EXHAUSTIVE_TEST_ORDER) +{ + secp256k1_scalar computed_r; + int overflow = 0; + secp256k1_ge pr_ge; + secp256k1_ge_set_gej(&pr_ge, &pr); + secp256k1_fe_normalize(&pr_ge.x); + + secp256k1_fe_get_b32(c, &pr_ge.x); + secp256k1_scalar_set_b32(&computed_r, c, &overflow); + /* we fully expect overflow */ + return secp256k1_scalar_eq(sigr, &computed_r); +} +#else secp256k1_scalar_get_b32(c, sigr); secp256k1_fe_set_b32(&xr, c); @@ -252,6 +269,7 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const return 1; } return 0; +#endif } static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) { diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h index 7a6a25318c..0db314c48e 100644 --- a/src/secp256k1/src/ecmult_const_impl.h +++ b/src/secp256k1/src/ecmult_const_impl.h @@ -78,7 +78,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) { /* Negative numbers will be negated to keep their bit representation below the maximum width */ flip = secp256k1_scalar_is_high(&s); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ (s.d[0] & 1); + bit = flip ^ !secp256k1_scalar_is_even(&s); /* We check for negative one, since adding 2 to it will cause an overflow */ secp256k1_scalar_negate(&neg_s, &s); not_neg_one = !secp256k1_scalar_is_one(&neg_s); diff --git a/src/secp256k1/src/ecmult_gen_impl.h b/src/secp256k1/src/ecmult_gen_impl.h index b63c4d8662..35f2546077 100644 --- a/src/secp256k1/src/ecmult_gen_impl.h +++ b/src/secp256k1/src/ecmult_gen_impl.h @@ -77,7 +77,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - secp256k1_ge_set_all_gej_var(1024, prec, precj, cb); + secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb); } for (j = 0; j < 64; j++) { for (i = 0; i < 16; i++) { diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h index 81ae08e100..4e40104ad4 100644 --- a/src/secp256k1/src/ecmult_impl.h +++ b/src/secp256k1/src/ecmult_impl.h @@ -7,15 +7,29 @@ #ifndef _SECP256K1_ECMULT_IMPL_H_ #define _SECP256K1_ECMULT_IMPL_H_ +#include <string.h> + #include "group.h" #include "scalar.h" #include "ecmult.h" -#include <string.h> - +#if defined(EXHAUSTIVE_TEST_ORDER) +/* We need to lower these values for exhaustive tests because + * the tables cannot have infinities in them (this breaks the + * affine-isomorphism stuff which tracks z-ratios) */ +# if EXHAUSTIVE_TEST_ORDER > 128 +# define WINDOW_A 5 +# define WINDOW_G 8 +# elif EXHAUSTIVE_TEST_ORDER > 8 +# define WINDOW_A 4 +# define WINDOW_G 4 +# else +# define WINDOW_A 2 +# define WINDOW_G 2 +# endif +#else /* optimal for 128-bit and 256-bit exponents. */ #define WINDOW_A 5 - /** larger numbers may result in slightly better performance, at the cost of exponentially larger precomputed tables. */ #ifdef USE_ENDOMORPHISM @@ -25,6 +39,7 @@ /** One table for window size 16: 1.375 MiB. */ #define WINDOW_G 16 #endif +#endif /** The number of entries a table with precomputed multiples needs to have. */ #define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) @@ -103,7 +118,7 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(n, prej, zr, a); /* Convert them in batch to affine coordinates. */ - secp256k1_ge_set_table_gej_var(n, prea, prej, zr); + secp256k1_ge_set_table_gej_var(prea, prej, zr, n); /* Convert them to compact storage form. */ for (i = 0; i < n; i++) { secp256k1_ge_to_storage(&pre[i], &prea[i]); diff --git a/src/secp256k1/src/field.h b/src/secp256k1/src/field.h index c5ba074244..bbb1ee866c 100644 --- a/src/secp256k1/src/field.h +++ b/src/secp256k1/src/field.h @@ -30,6 +30,8 @@ #error "Please select field implementation" #endif +#include "util.h" + /** Normalize a field element. */ static void secp256k1_fe_normalize(secp256k1_fe *r); @@ -50,6 +52,9 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r); /** Set a field element equal to a small integer. Resulting field element is normalized. */ static void secp256k1_fe_set_int(secp256k1_fe *r, int a); +/** Sets a field element equal to zero, initializing all fields. */ +static void secp256k1_fe_clear(secp256k1_fe *a); + /** Verify whether a field element is zero. Requires the input to be normalized. */ static int secp256k1_fe_is_zero(const secp256k1_fe *a); @@ -110,7 +115,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a); /** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and * outputs must not overlap in memory. */ -static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a); +static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len); /** Convert a field element to the storage type. */ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); diff --git a/src/secp256k1/src/field_impl.h b/src/secp256k1/src/field_impl.h index 52cd902eb3..5127b279bc 100644 --- a/src/secp256k1/src/field_impl.h +++ b/src/secp256k1/src/field_impl.h @@ -260,7 +260,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { #endif } -static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a) { +static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) { secp256k1_fe u; size_t i; if (len < 1) { diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h index d515716744..4957b248fe 100644 --- a/src/secp256k1/src/group.h +++ b/src/secp256k1/src/group.h @@ -65,12 +65,12 @@ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb); +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb); /** Set a batch of group elements equal to the inputs given in jacobian * coordinates (with known z-ratios). zr must contain the known z-ratios such * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */ -static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr); +static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index 3e9c4c410d..2e192b62fd 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -11,6 +11,53 @@ #include "field.h" #include "group.h" +/* These points can be generated in sage as follows: + * + * 0. Setup a worksheet with the following parameters. + * b = 4 # whatever CURVE_B will be set to + * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) + * C = EllipticCurve ([F (0), F (b)]) + * + * 1. Determine all the small orders available to you. (If there are + * no satisfactory ones, go back and change b.) + * print C.order().factor(limit=1000) + * + * 2. Choose an order as one of the prime factors listed in the above step. + * (You can also multiply some to get a composite order, though the + * tests will crash trying to invert scalars during signing.) We take a + * random point and scale it to drop its order to the desired value. + * There is some probability this won't work; just try again. + * order = 199 + * P = C.random_point() + * P = (int(P.order()) / int(order)) * P + * assert(P.order() == order) + * + * 3. Print the values. You'll need to use a vim macro or something to + * split the hex output into 4-byte chunks. + * print "%x %x" % P.xy() + */ +#if defined(EXHAUSTIVE_TEST_ORDER) +# if EXHAUSTIVE_TEST_ORDER == 199 +const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( + 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, + 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, + 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, + 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED +); + +const int CURVE_B = 4; +# elif EXHAUSTIVE_TEST_ORDER == 13 +const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( + 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, + 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, + 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, + 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac +); +const int CURVE_B = 2; +# else +# error No known generator for the specified exhaustive test group order. +# endif +#else /** Generator for secp256k1, value 'g' defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -21,8 +68,11 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); +const int CURVE_B = 7; +#endif + static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { - secp256k1_fe zi2; + secp256k1_fe zi2; secp256k1_fe zi3; secp256k1_fe_sqr(&zi2, zi); secp256k1_fe_mul(&zi3, &zi2, zi); @@ -76,7 +126,7 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { r->y = a->y; } -static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb) { +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) { secp256k1_fe *az; secp256k1_fe *azi; size_t i; @@ -89,7 +139,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp } azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count); - secp256k1_fe_inv_all_var(count, azi, az); + secp256k1_fe_inv_all_var(azi, az, count); free(az); count = 0; @@ -102,7 +152,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp free(azi); } -static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr) { +static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) { size_t i = len - 1; secp256k1_fe zi; @@ -145,9 +195,15 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp static void secp256k1_gej_set_infinity(secp256k1_gej *r) { r->infinity = 1; - secp256k1_fe_set_int(&r->x, 0); - secp256k1_fe_set_int(&r->y, 0); - secp256k1_fe_set_int(&r->z, 0); + secp256k1_fe_clear(&r->x); + secp256k1_fe_clear(&r->y); + secp256k1_fe_clear(&r->z); +} + +static void secp256k1_ge_set_infinity(secp256k1_ge *r) { + r->infinity = 1; + secp256k1_fe_clear(&r->x); + secp256k1_fe_clear(&r->y); } static void secp256k1_gej_clear(secp256k1_gej *r) { @@ -169,7 +225,7 @@ static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { secp256k1_fe_sqr(&x2, x); secp256k1_fe_mul(&x3, x, &x2); r->infinity = 0; - secp256k1_fe_set_int(&c, 7); + secp256k1_fe_set_int(&c, CURVE_B); secp256k1_fe_add(&c, &x3); return secp256k1_fe_sqrt(&r->y, &c); } @@ -228,7 +284,7 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); secp256k1_fe_sqr(&z2, &a->z); secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); - secp256k1_fe_mul_int(&z6, 7); + secp256k1_fe_mul_int(&z6, CURVE_B); secp256k1_fe_add(&x3, &z6); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); @@ -242,7 +298,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { /* y^2 = x^3 + 7 */ secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_set_int(&c, 7); + secp256k1_fe_set_int(&c, CURVE_B); secp256k1_fe_add(&x3, &c); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); @@ -260,7 +316,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. - * + * * Having said this, if this function receives a point on a sextic twist, e.g. by * a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6, * since -6 does have a cube root mod p. For this point, this function will not set diff --git a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java index be67048fbe..1c67802fba 100644 --- a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java +++ b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java @@ -32,7 +32,7 @@ import static org.bitcoin.NativeSecp256k1Util.*; * <p>You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1</p> * * <p>To build secp256k1 for use with bitcoinj, run - * `./configure --enable-jni --enable-experimental --enable-module-schnorr --enable-module-ecdh` + * `./configure --enable-jni --enable-experimental --enable-module-ecdh` * and `make` then copy `.libs/libsecp256k1.so` to your system library path * or point the JVM to the folder containing it with -Djava.library.path * </p> @@ -417,36 +417,6 @@ public class NativeSecp256k1 { } } - public static byte[] schnorrSign(byte[] data, byte[] sec) throws AssertFailException { - Preconditions.checkArgument(data.length == 32 && sec.length <= 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null) { - byteBuff = ByteBuffer.allocateDirect(32 + 32); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(data); - byteBuff.put(sec); - - byte[][] retByteArray; - - r.lock(); - try { - retByteArray = secp256k1_schnorr_sign(byteBuff, Secp256k1Context.getContext()); - } finally { - r.unlock(); - } - - byte[] sigArr = retByteArray[0]; - int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); - - assertEquals(sigArr.length, 64, "Got bad signature length."); - - return retVal == 0 ? new byte[0] : sigArr; - } - private static native long secp256k1_ctx_clone(long context); private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context); @@ -471,8 +441,6 @@ public class NativeSecp256k1 { private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen); - private static native byte[][] secp256k1_schnorr_sign(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen); } diff --git a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java index f18ce95810..c00d08899b 100644 --- a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java +++ b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java @@ -167,22 +167,6 @@ public class NativeSecp256k1Test { assertEquals( result, true, "testRandomize"); } - /** - * This tests signSchnorr() for a valid secretkey - */ - public static void testSchnorrSign() throws AssertFailException{ - - byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.schnorrSign(data, sec); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString, "C5E929AA058B982048760422D3B563749B7D0E50C5EBD8CD2FFC23214BD6A2F1B072C13880997EBA847CF20F2F90FCE07C1CA33A890A4127095A351127F8D95F" , "testSchnorrSign"); - } - - /** - * This tests signSchnorr() for a valid secretkey - */ public static void testCreateECDHSecret() throws AssertFailException{ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); @@ -216,11 +200,6 @@ public class NativeSecp256k1Test { testSignPos(); testSignNeg(); - //Test Schnorr (partial support) //TODO - testSchnorrSign(); - //testSchnorrVerify - //testSchnorrRecovery - //Test privKeyTweakAdd() 1 testPrivKeyTweakAdd_1(); diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c index dba9524dd4..bcef7b32ce 100644 --- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c +++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c @@ -5,7 +5,6 @@ #include "include/secp256k1.h" #include "include/secp256k1_ecdh.h" #include "include/secp256k1_recovery.h" -#include "include/secp256k1_schnorr.h" SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone @@ -333,39 +332,6 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1p return 0; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1schnorr_1sign - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - unsigned char* secKey = (unsigned char*) (data + 32); - - jobjectArray retArray; - jbyteArray sigArray, intsByteArray; - unsigned char intsarray[1]; - unsigned char sig[64]; - - int ret = secp256k1_schnorr_sign(ctx, sig, data, secKey, NULL, NULL); - - intsarray[0] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - sigArray = (*env)->NewByteArray(env, 64); - (*env)->SetByteArrayRegion(env, sigArray, 0, 64, (jbyte*)sig); - (*env)->SetObjectArrayElement(env, retArray, 0, sigArray); - - intsByteArray = (*env)->NewByteArray(env, 1); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) { diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h index 4125a1f523..fe613c9e9e 100644 --- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h +++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h @@ -106,14 +106,6 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_schnorr_sign - * Signature: (Ljava/nio/ByteBuffer;JI)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1schnorr_1sign - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l); - -/* - * Class: org_bitcoin_NativeSecp256k1 * Method: secp256k1_ecdh * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h index ec42f4bb6c..86f2f0cb2b 100644..100755 --- a/src/secp256k1/src/modules/recovery/main_impl.h +++ b/src/secp256k1/src/modules/recovery/main_impl.h @@ -138,16 +138,15 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { - unsigned char nonce32[32]; ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); if (!secp256k1_scalar_is_zero(&non) && !overflow) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { break; @@ -155,6 +154,7 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd } count++; } + memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); diff --git a/src/secp256k1/src/modules/schnorr/Makefile.am.include b/src/secp256k1/src/modules/schnorr/Makefile.am.include deleted file mode 100644 index f1af8e8325..0000000000 --- a/src/secp256k1/src/modules/schnorr/Makefile.am.include +++ /dev/null @@ -1,10 +0,0 @@ -include_HEADERS += include/secp256k1_schnorr.h -noinst_HEADERS += src/modules/schnorr/main_impl.h -noinst_HEADERS += src/modules/schnorr/schnorr.h -noinst_HEADERS += src/modules/schnorr/schnorr_impl.h -noinst_HEADERS += src/modules/schnorr/tests_impl.h -if USE_BENCHMARK -noinst_PROGRAMS += bench_schnorr_verify -bench_schnorr_verify_SOURCES = src/bench_schnorr_verify.c -bench_schnorr_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) -endif diff --git a/src/secp256k1/src/modules/schnorr/main_impl.h b/src/secp256k1/src/modules/schnorr/main_impl.h deleted file mode 100644 index fa176a1767..0000000000 --- a/src/secp256k1/src/modules/schnorr/main_impl.h +++ /dev/null @@ -1,164 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef SECP256K1_MODULE_SCHNORR_MAIN -#define SECP256K1_MODULE_SCHNORR_MAIN - -#include "include/secp256k1_schnorr.h" -#include "modules/schnorr/schnorr_impl.h" - -static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { - secp256k1_sha256_t sha; - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, r32, 32); - secp256k1_sha256_write(&sha, msg32, 32); - secp256k1_sha256_finalize(&sha, h32); -} - -static const unsigned char secp256k1_schnorr_algo16[17] = "Schnorr+SHA256 "; - -int secp256k1_schnorr_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar sec, non; - int ret = 0; - int overflow = 0; - unsigned int count = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(seckey != NULL); - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - secp256k1_scalar_set_b32(&sec, seckey, NULL); - while (1) { - unsigned char nonce32[32]; - ret = noncefp(nonce32, msg32, seckey, secp256k1_schnorr_algo16, (void*)noncedata, count); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); - if (!secp256k1_scalar_is_zero(&non) && !overflow) { - if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, NULL, secp256k1_schnorr_msghash_sha256, msg32)) { - break; - } - } - count++; - } - if (!ret) { - memset(sig64, 0, 64); - } - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); - return ret; -} - -int secp256k1_schnorr_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { - secp256k1_ge q; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(pubkey != NULL); - - secp256k1_pubkey_load(ctx, &q, pubkey); - return secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32); -} - -int secp256k1_schnorr_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *sig64, const unsigned char *msg32) { - secp256k1_ge q; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(pubkey != NULL); - - if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32)) { - secp256k1_pubkey_save(pubkey, &q); - return 1; - } else { - memset(pubkey, 0, sizeof(*pubkey)); - return 0; - } -} - -int secp256k1_schnorr_generate_nonce_pair(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, unsigned char *privnonce32, const unsigned char *sec32, const unsigned char *msg32, secp256k1_nonce_function noncefp, const void* noncedata) { - int count = 0; - int ret = 1; - secp256k1_gej Qj; - secp256k1_ge Q; - secp256k1_scalar sec; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sec32 != NULL); - ARG_CHECK(pubnonce != NULL); - ARG_CHECK(privnonce32 != NULL); - - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - do { - int overflow; - ret = noncefp(privnonce32, sec32, msg32, secp256k1_schnorr_algo16, (void*)noncedata, count++); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&sec, privnonce32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&sec)) { - continue; - } - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sec); - secp256k1_ge_set_gej(&Q, &Qj); - - secp256k1_pubkey_save(pubnonce, &Q); - break; - } while(1); - - secp256k1_scalar_clear(&sec); - if (!ret) { - memset(pubnonce, 0, sizeof(*pubnonce)); - } - return ret; -} - -int secp256k1_schnorr_partial_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *sec32, const secp256k1_pubkey *pubnonce_others, const unsigned char *secnonce32) { - int overflow = 0; - secp256k1_scalar sec, non; - secp256k1_ge pubnon; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(sec32 != NULL); - ARG_CHECK(secnonce32 != NULL); - ARG_CHECK(pubnonce_others != NULL); - - secp256k1_scalar_set_b32(&sec, sec32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&sec)) { - return -1; - } - secp256k1_scalar_set_b32(&non, secnonce32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&non)) { - return -1; - } - secp256k1_pubkey_load(ctx, &pubnon, pubnonce_others); - return secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, &pubnon, secp256k1_schnorr_msghash_sha256, msg32); -} - -int secp256k1_schnorr_partial_combine(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char * const *sig64sin, size_t n) { - ARG_CHECK(sig64 != NULL); - ARG_CHECK(n >= 1); - ARG_CHECK(sig64sin != NULL); - return secp256k1_schnorr_sig_combine(sig64, n, sig64sin); -} - -#endif diff --git a/src/secp256k1/src/modules/schnorr/schnorr.h b/src/secp256k1/src/modules/schnorr/schnorr.h deleted file mode 100644 index de18147bd5..0000000000 --- a/src/secp256k1/src/modules/schnorr/schnorr.h +++ /dev/null @@ -1,20 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php. * - ***********************************************************************/ - -#ifndef _SECP256K1_MODULE_SCHNORR_H_ -#define _SECP256K1_MODULE_SCHNORR_H_ - -#include "scalar.h" -#include "group.h" - -typedef void (*secp256k1_schnorr_msghash)(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32); - -static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins); - -#endif diff --git a/src/secp256k1/src/modules/schnorr/schnorr_impl.h b/src/secp256k1/src/modules/schnorr/schnorr_impl.h deleted file mode 100644 index e13ab6db7c..0000000000 --- a/src/secp256k1/src/modules/schnorr/schnorr_impl.h +++ /dev/null @@ -1,207 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php. * - ***********************************************************************/ - -#ifndef _SECP256K1_SCHNORR_IMPL_H_ -#define _SECP256K1_SCHNORR_IMPL_H_ - -#include <string.h> - -#include "schnorr.h" -#include "num.h" -#include "field.h" -#include "group.h" -#include "ecmult.h" -#include "ecmult_gen.h" - -/** - * Custom Schnorr-based signature scheme. They support multiparty signing, public key - * recovery and batch validation. - * - * Rationale for verifying R's y coordinate: - * In order to support batch validation and public key recovery, the full R point must - * be known to verifiers, rather than just its x coordinate. In order to not risk - * being more strict in batch validation than normal validation, validators must be - * required to reject signatures with incorrect y coordinate. This is only possible - * by including a (relatively slow) field inverse, or a field square root. However, - * batch validation offers potentially much higher benefits than this cost. - * - * Rationale for having an implicit y coordinate oddness: - * If we commit to having the full R point known to verifiers, there are two mechanism. - * Either include its oddness in the signature, or give it an implicit fixed value. - * As the R y coordinate can be flipped by a simple negation of the nonce, we choose the - * latter, as it comes with nearly zero impact on signing or validation performance, and - * saves a byte in the signature. - * - * Signing: - * Inputs: 32-byte message m, 32-byte scalar key x (!=0), 32-byte scalar nonce k (!=0) - * - * Compute point R = k * G. Reject nonce if R's y coordinate is odd (or negate nonce). - * Compute 32-byte r, the serialization of R's x coordinate. - * Compute scalar h = Hash(r || m). Reject nonce if h == 0 or h >= order. - * Compute scalar s = k - h * x. - * The signature is (r, s). - * - * - * Verification: - * Inputs: 32-byte message m, public key point Q, signature: (32-byte r, scalar s) - * - * Signature is invalid if s >= order. - * Signature is invalid if r >= p. - * Compute scalar h = Hash(r || m). Signature is invalid if h == 0 or h >= order. - * Option 1 (faster for single verification): - * Compute point R = h * Q + s * G. Signature is invalid if R is infinity or R's y coordinate is odd. - * Signature is valid if the serialization of R's x coordinate equals r. - * Option 2 (allows batch validation and pubkey recovery): - * Decompress x coordinate r into point R, with odd y coordinate. Fail if R is not on the curve. - * Signature is valid if R + h * Q + s * G == 0. - */ - -static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Rj; - secp256k1_ge Ra; - unsigned char h32[32]; - secp256k1_scalar h, s; - int overflow; - secp256k1_scalar n; - - if (secp256k1_scalar_is_zero(key) || secp256k1_scalar_is_zero(nonce)) { - return 0; - } - n = *nonce; - - secp256k1_ecmult_gen(ctx, &Rj, &n); - if (pubnonce != NULL) { - secp256k1_gej_add_ge(&Rj, &Rj, pubnonce); - } - secp256k1_ge_set_gej(&Ra, &Rj); - secp256k1_fe_normalize(&Ra.y); - if (secp256k1_fe_is_odd(&Ra.y)) { - /* R's y coordinate is odd, which is not allowed (see rationale above). - Force it to be even by negating the nonce. Note that this even works - for multiparty signing, as the R point is known to all participants, - which can all decide to flip the sign in unison, resulting in the - overall R point to be negated too. */ - secp256k1_scalar_negate(&n, &n); - } - secp256k1_fe_normalize(&Ra.x); - secp256k1_fe_get_b32(sig64, &Ra.x); - hash(h32, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, h32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - secp256k1_scalar_clear(&n); - return 0; - } - secp256k1_scalar_mul(&s, &h, key); - secp256k1_scalar_negate(&s, &s); - secp256k1_scalar_add(&s, &s, &n); - secp256k1_scalar_clear(&n); - secp256k1_scalar_get_b32(sig64 + 32, &s); - return 1; -} - -static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Qj, Rj; - secp256k1_ge Ra; - secp256k1_fe Rx; - secp256k1_scalar h, s; - unsigned char hh[32]; - int overflow; - - if (secp256k1_ge_is_infinity(pubkey)) { - return 0; - } - hash(hh, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, hh, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - return 0; - } - overflow = 0; - secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); - if (overflow) { - return 0; - } - if (!secp256k1_fe_set_b32(&Rx, sig64)) { - return 0; - } - secp256k1_gej_set_ge(&Qj, pubkey); - secp256k1_ecmult(ctx, &Rj, &Qj, &h, &s); - if (secp256k1_gej_is_infinity(&Rj)) { - return 0; - } - secp256k1_ge_set_gej_var(&Ra, &Rj); - secp256k1_fe_normalize_var(&Ra.y); - if (secp256k1_fe_is_odd(&Ra.y)) { - return 0; - } - return secp256k1_fe_equal_var(&Rx, &Ra.x); -} - -static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Qj, Rj; - secp256k1_ge Ra; - secp256k1_fe Rx; - secp256k1_scalar h, s; - unsigned char hh[32]; - int overflow; - - hash(hh, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, hh, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - return 0; - } - overflow = 0; - secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); - if (overflow) { - return 0; - } - if (!secp256k1_fe_set_b32(&Rx, sig64)) { - return 0; - } - if (!secp256k1_ge_set_xo_var(&Ra, &Rx, 0)) { - return 0; - } - secp256k1_gej_set_ge(&Rj, &Ra); - secp256k1_scalar_inverse_var(&h, &h); - secp256k1_scalar_negate(&s, &s); - secp256k1_scalar_mul(&s, &s, &h); - secp256k1_ecmult(ctx, &Qj, &Rj, &h, &s); - if (secp256k1_gej_is_infinity(&Qj)) { - return 0; - } - secp256k1_ge_set_gej(pubkey, &Qj); - return 1; -} - -static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins) { - secp256k1_scalar s = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - size_t i; - for (i = 0; i < n; i++) { - secp256k1_scalar si; - int overflow; - secp256k1_scalar_set_b32(&si, sig64ins[i] + 32, &overflow); - if (overflow) { - return -1; - } - if (i) { - if (memcmp(sig64ins[i - 1], sig64ins[i], 32) != 0) { - return -1; - } - } - secp256k1_scalar_add(&s, &s, &si); - } - if (secp256k1_scalar_is_zero(&s)) { - return 0; - } - memcpy(sig64, sig64ins[0], 32); - secp256k1_scalar_get_b32(sig64 + 32, &s); - secp256k1_scalar_clear(&s); - return 1; -} - -#endif diff --git a/src/secp256k1/src/modules/schnorr/tests_impl.h b/src/secp256k1/src/modules/schnorr/tests_impl.h deleted file mode 100644 index 5bd14a03e3..0000000000 --- a/src/secp256k1/src/modules/schnorr/tests_impl.h +++ /dev/null @@ -1,175 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef SECP256K1_MODULE_SCHNORR_TESTS -#define SECP256K1_MODULE_SCHNORR_TESTS - -#include "include/secp256k1_schnorr.h" - -void test_schnorr_end_to_end(void) { - unsigned char privkey[32]; - unsigned char message[32]; - unsigned char schnorr_signature[64]; - secp256k1_pubkey pubkey, recpubkey; - - /* Generate a random key and message. */ - { - secp256k1_scalar key; - random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_rand256_test(message); - } - - /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); - - /* Schnorr sign. */ - CHECK(secp256k1_schnorr_sign(ctx, schnorr_signature, message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 1); - CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) == 1); - CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); - /* Destroy signature and verify again. */ - schnorr_signature[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 0); - CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) != 1 || - memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); -} - -/** Horribly broken hash function. Do not use for anything but tests. */ -void test_schnorr_hash(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { - int i; - for (i = 0; i < 32; i++) { - h32[i] = r32[i] ^ msg32[i]; - } -} - -void test_schnorr_sign_verify(void) { - unsigned char msg32[32]; - unsigned char sig64[3][64]; - secp256k1_gej pubkeyj[3]; - secp256k1_ge pubkey[3]; - secp256k1_scalar nonce[3], key[3]; - int i = 0; - int k; - - secp256k1_rand256_test(msg32); - - for (k = 0; k < 3; k++) { - random_scalar_order_test(&key[k]); - - do { - random_scalar_order_test(&nonce[k]); - if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64[k], &key[k], &nonce[k], NULL, &test_schnorr_hash, msg32)) { - break; - } - } while(1); - - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubkeyj[k], &key[k]); - secp256k1_ge_set_gej_var(&pubkey[k], &pubkeyj[k]); - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32)); - - for (i = 0; i < 4; i++) { - int pos = secp256k1_rand_bits(6); - int mod = 1 + secp256k1_rand_int(255); - sig64[k][pos] ^= mod; - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32) == 0); - sig64[k][pos] ^= mod; - } - } -} - -void test_schnorr_threshold(void) { - unsigned char msg[32]; - unsigned char sec[5][32]; - secp256k1_pubkey pub[5]; - unsigned char nonce[5][32]; - secp256k1_pubkey pubnonce[5]; - unsigned char sig[5][64]; - const unsigned char* sigs[5]; - unsigned char allsig[64]; - const secp256k1_pubkey* pubs[5]; - secp256k1_pubkey allpub; - int n, i; - int damage; - int ret = 0; - - damage = secp256k1_rand_bits(1) ? (1 + secp256k1_rand_int(4)) : 0; - secp256k1_rand256_test(msg); - n = 2 + secp256k1_rand_int(4); - for (i = 0; i < n; i++) { - do { - secp256k1_rand256_test(sec[i]); - } while (!secp256k1_ec_seckey_verify(ctx, sec[i])); - CHECK(secp256k1_ec_pubkey_create(ctx, &pub[i], sec[i])); - CHECK(secp256k1_schnorr_generate_nonce_pair(ctx, &pubnonce[i], nonce[i], msg, sec[i], NULL, NULL)); - pubs[i] = &pub[i]; - } - if (damage == 1) { - nonce[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } else if (damage == 2) { - sec[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } - for (i = 0; i < n; i++) { - secp256k1_pubkey allpubnonce; - const secp256k1_pubkey *pubnonces[4]; - int j; - for (j = 0; j < i; j++) { - pubnonces[j] = &pubnonce[j]; - } - for (j = i + 1; j < n; j++) { - pubnonces[j - 1] = &pubnonce[j]; - } - CHECK(secp256k1_ec_pubkey_combine(ctx, &allpubnonce, pubnonces, n - 1)); - ret |= (secp256k1_schnorr_partial_sign(ctx, sig[i], msg, sec[i], &allpubnonce, nonce[i]) != 1) * 1; - sigs[i] = sig[i]; - } - if (damage == 3) { - sig[secp256k1_rand_int(n)][secp256k1_rand_bits(6)] ^= 1 + secp256k1_rand_int(255); - } - ret |= (secp256k1_ec_pubkey_combine(ctx, &allpub, pubs, n) != 1) * 2; - if ((ret & 1) == 0) { - ret |= (secp256k1_schnorr_partial_combine(ctx, allsig, sigs, n) != 1) * 4; - } - if (damage == 4) { - allsig[secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } - if ((ret & 7) == 0) { - ret |= (secp256k1_schnorr_verify(ctx, allsig, msg, &allpub) != 1) * 8; - } - CHECK((ret == 0) == (damage == 0)); -} - -void test_schnorr_recovery(void) { - unsigned char msg32[32]; - unsigned char sig64[64]; - secp256k1_ge Q; - - secp256k1_rand256_test(msg32); - secp256k1_rand256_test(sig64); - secp256k1_rand256_test(sig64 + 32); - if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1) { - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1); - } -} - -void run_schnorr_tests(void) { - int i; - for (i = 0; i < 32*count; i++) { - test_schnorr_end_to_end(); - } - for (i = 0; i < 32 * count; i++) { - test_schnorr_sign_verify(); - } - for (i = 0; i < 16 * count; i++) { - test_schnorr_recovery(); - } - for (i = 0; i < 10 * count; i++) { - test_schnorr_threshold(); - } -} - -#endif diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h index b590ccd6dd..27e9d8375e 100644 --- a/src/secp256k1/src/scalar.h +++ b/src/secp256k1/src/scalar.h @@ -13,7 +13,9 @@ #include "libsecp256k1-config.h" #endif -#if defined(USE_SCALAR_4X64) +#if defined(EXHAUSTIVE_TEST_ORDER) +#include "scalar_low.h" +#elif defined(USE_SCALAR_4X64) #include "scalar_4x64.h" #elif defined(USE_SCALAR_8X32) #include "scalar_8x32.h" diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h index aa2703dd23..56e7bd82af 100644 --- a/src/secp256k1/src/scalar_4x64_impl.h +++ b/src/secp256k1/src/scalar_4x64_impl.h @@ -282,8 +282,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq 56(%%rsi), %%r14\n" /* Initialize r8,r9,r10 */ "movq 0(%%rsi), %%r8\n" - "movq $0, %%r9\n" - "movq $0, %%r10\n" + "xorq %%r9, %%r9\n" + "xorq %%r10, %%r10\n" /* (r8,r9) += n0 * c0 */ "movq %8, %%rax\n" "mulq %%r11\n" @@ -291,7 +291,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* extract m0 */ "movq %%r8, %q0\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10) += l1 */ "addq 8(%%rsi), %%r9\n" "adcq $0, %%r10\n" @@ -309,7 +309,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* extract m1 */ "movq %%r9, %q1\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r10,r8,r9) += l2 */ "addq 16(%%rsi), %%r10\n" "adcq $0, %%r8\n" @@ -332,7 +332,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r9\n" /* extract m2 */ "movq %%r10, %q2\n" - "movq $0, %%r10\n" + "xorq %%r10, %%r10\n" /* (r8,r9,r10) += l3 */ "addq 24(%%rsi), %%r8\n" "adcq $0, %%r9\n" @@ -355,7 +355,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r10\n" /* extract m3 */ "movq %%r8, %q3\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10,r8) += n3 * c1 */ "movq %9, %%rax\n" "mulq %%r14\n" @@ -387,8 +387,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq %q11, %%r13\n" /* Initialize (r8,r9,r10) */ "movq %q5, %%r8\n" - "movq $0, %%r9\n" - "movq $0, %%r10\n" + "xorq %%r9, %%r9\n" + "xorq %%r10, %%r10\n" /* (r8,r9) += m4 * c0 */ "movq %12, %%rax\n" "mulq %%r11\n" @@ -396,7 +396,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* extract p0 */ "movq %%r8, %q0\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10) += m1 */ "addq %q6, %%r9\n" "adcq $0, %%r10\n" @@ -414,7 +414,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* extract p1 */ "movq %%r9, %q1\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r10,r8,r9) += m2 */ "addq %q7, %%r10\n" "adcq $0, %%r8\n" @@ -472,7 +472,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq %%rax, 0(%q6)\n" /* Move to (r8,r9) */ "movq %%rdx, %%r8\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r8,r9) += p1 */ "addq %q2, %%r8\n" "adcq $0, %%r9\n" @@ -483,7 +483,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* Extract r1 */ "movq %%r8, 8(%q6)\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r8) += p4 */ "addq %%r10, %%r9\n" "adcq $0, %%r8\n" @@ -492,7 +492,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* Extract r2 */ "movq %%r9, 16(%q6)\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r8,r9) += p3 */ "addq %q4, %%r8\n" "adcq $0, %%r9\n" diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h index c5baf4df41..f5b2376407 100644 --- a/src/secp256k1/src/scalar_impl.h +++ b/src/secp256k1/src/scalar_impl.h @@ -14,7 +14,9 @@ #include "libsecp256k1-config.h" #endif -#if defined(USE_SCALAR_4X64) +#if defined(EXHAUSTIVE_TEST_ORDER) +#include "scalar_low_impl.h" +#elif defined(USE_SCALAR_4X64) #include "scalar_4x64_impl.h" #elif defined(USE_SCALAR_8X32) #include "scalar_8x32_impl.h" @@ -31,17 +33,37 @@ static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a /** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ static void secp256k1_scalar_order_get_num(secp256k1_num *r) { +#if defined(EXHAUSTIVE_TEST_ORDER) + static const unsigned char order[32] = { + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER + }; +#else static const unsigned char order[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 }; +#endif secp256k1_num_set_bin(r, order, 32); } #endif static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { +#if defined(EXHAUSTIVE_TEST_ORDER) + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} +#else secp256k1_scalar *t; int i; /* First compute x ^ (2^N - 1) for some values of N. */ @@ -233,9 +255,9 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - /* d[0] is present and is the lowest word for all representations */ return !(a->d[0] & 1); } +#endif static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(USE_SCALAR_INV_BUILTIN) @@ -259,6 +281,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc } #ifdef USE_ENDOMORPHISM +#if defined(EXHAUSTIVE_TEST_ORDER) +/** + * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the + * full case we don't bother making k1 and k2 be small, we just want them to be + * nontrivial to get full test coverage for the exhaustive tests. We therefore + * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. + */ +static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { + *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; + *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; +} +#else /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, @@ -331,5 +365,6 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar secp256k1_scalar_add(r1, r1, a); } #endif +#endif #endif diff --git a/src/secp256k1/src/scalar_low.h b/src/secp256k1/src/scalar_low.h new file mode 100644 index 0000000000..5574c44c7a --- /dev/null +++ b/src/secp256k1/src/scalar_low.h @@ -0,0 +1,15 @@ +/********************************************************************** + * Copyright (c) 2015 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_SCALAR_REPR_ +#define _SECP256K1_SCALAR_REPR_ + +#include <stdint.h> + +/** A scalar modulo the group order of the secp256k1 curve. */ +typedef uint32_t secp256k1_scalar; + +#endif diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h new file mode 100644 index 0000000000..4f94441f49 --- /dev/null +++ b/src/secp256k1/src/scalar_low_impl.h @@ -0,0 +1,114 @@ +/********************************************************************** + * Copyright (c) 2015 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ +#define _SECP256K1_SCALAR_REPR_IMPL_H_ + +#include "scalar.h" + +#include <string.h> + +SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { + return !(*a & 1); +} + +SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } +SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { + if (offset < 32) + return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); + else + return 0; +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { + return secp256k1_scalar_get_bits(a, offset, count); +} + +SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } + +static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { + *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; + return *r < *b; +} + +static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { + if (flag && bit < 32) + *r += (1 << bit); +#ifdef VERIFY + VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); +#endif +} + +static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { + const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; + int i; + *r = 0; + for (i = 0; i < 32; i++) { + *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER; + } + /* just deny overflow, it basically always happens */ + if (overflow) *overflow = 0; +} + +static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { + memset(bin, 0, 32); + bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; +} + +SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { + return *a == 0; +} + +static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { + if (*a == 0) { + *r = 0; + } else { + *r = EXHAUSTIVE_TEST_ORDER - *a; + } +} + +SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { + return *a == 1; +} + +static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { + return *a > EXHAUSTIVE_TEST_ORDER / 2; +} + +static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { + if (flag) secp256k1_scalar_negate(r, r); + return flag ? -1 : 1; +} + +static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { + *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; +} + +static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { + int ret; + VERIFY_CHECK(n > 0); + VERIFY_CHECK(n < 16); + ret = *r & ((1 << n) - 1); + *r >>= n; + return ret; +} + +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { + *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; +} + +static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { + *r1 = *a; + *r2 = 0; +} + +SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { + return *a == *b; +} + +#endif diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index 7973d60c36..fb8b882faa 100644..100755 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -359,16 +359,15 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { - unsigned char nonce32[32]; ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); if (!overflow && !secp256k1_scalar_is_zero(&non)) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { break; @@ -376,6 +375,7 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature } count++; } + memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c index b32cb90813..9ae7d30281 100644 --- a/src/secp256k1/src/tests.c +++ b/src/secp256k1/src/tests.c @@ -520,7 +520,7 @@ void test_num_mod(void) { secp256k1_num order, n; /* check that 0 mod anything is 0 */ - random_scalar_order_test(&s); + random_scalar_order_test(&s); secp256k1_scalar_get_num(&order, &s); secp256k1_scalar_set_int(&s, 0); secp256k1_scalar_get_num(&n, &s); @@ -535,7 +535,7 @@ void test_num_mod(void) { CHECK(secp256k1_num_is_zero(&n)); /* check that increasing the number past 2^256 does not break this */ - random_scalar_order_test(&s); + random_scalar_order_test(&s); secp256k1_scalar_get_num(&n, &s); /* multiply by 2^8, which'll test this case with high probability */ for (i = 0; i < 8; ++i) { @@ -568,7 +568,7 @@ void test_num_jacobi(void) { /* we first need a scalar which is not a multiple of 5 */ do { secp256k1_num fiven; - random_scalar_order_test(&sqr); + random_scalar_order_test(&sqr); secp256k1_scalar_get_num(&fiven, &five); secp256k1_scalar_get_num(&n, &sqr); secp256k1_num_mod(&n, &fiven); @@ -587,7 +587,7 @@ void test_num_jacobi(void) { /** test with secp group order as order */ secp256k1_scalar_order_get_num(&order); - random_scalar_order_test(&sqr); + random_scalar_order_test(&sqr); secp256k1_scalar_sqr(&sqr, &sqr); /* test residue */ secp256k1_scalar_get_num(&n, &sqr); @@ -1733,18 +1733,18 @@ void run_field_inv_all_var(void) { secp256k1_fe x[16], xi[16], xii[16]; int i; /* Check it's safe to call for 0 elements */ - secp256k1_fe_inv_all_var(0, xi, x); + secp256k1_fe_inv_all_var(xi, x, 0); for (i = 0; i < count; i++) { size_t j; size_t len = secp256k1_rand_int(15) + 1; for (j = 0; j < len; j++) { random_fe_non_zero(&x[j]); } - secp256k1_fe_inv_all_var(len, xi, x); + secp256k1_fe_inv_all_var(xi, x, len); for (j = 0; j < len; j++) { CHECK(check_fe_inverse(&x[j], &xi[j])); } - secp256k1_fe_inv_all_var(len, xii, xi); + secp256k1_fe_inv_all_var(xii, xi, len); for (j = 0; j < len; j++) { CHECK(check_fe_equal(&x[j], &xii[j])); } @@ -1930,7 +1930,7 @@ void test_ge(void) { zs[i] = gej[i].z; } } - secp256k1_fe_inv_all_var(4 * runs + 1, zinv, zs); + secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1); free(zs); } @@ -2050,8 +2050,8 @@ void test_ge(void) { secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); } } - secp256k1_ge_set_table_gej_var(4 * runs + 1, ge_set_table, gej, zr); - secp256k1_ge_set_all_gej_var(4 * runs + 1, ge_set_all, gej, &ctx->error_callback); + secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1); + secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback); for (i = 0; i < 4 * runs + 1; i++) { secp256k1_fe s; random_fe_non_zero(&s); diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c new file mode 100644 index 0000000000..bda6ee475c --- /dev/null +++ b/src/secp256k1/src/tests_exhaustive.c @@ -0,0 +1,329 @@ +/*********************************************************************** + * Copyright (c) 2016 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include <stdio.h> +#include <stdlib.h> + +#include <time.h> + +#undef USE_ECMULT_STATIC_PRECOMPUTATION + +#ifndef EXHAUSTIVE_TEST_ORDER +/* see group_impl.h for allowable values */ +#define EXHAUSTIVE_TEST_ORDER 13 +#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ +#endif + +#include "include/secp256k1.h" +#include "group.h" +#include "secp256k1.c" +#include "testrand_impl.h" + +/** stolen from tests.c */ +void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { + CHECK(a->infinity == b->infinity); + if (a->infinity) { + return; + } + CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); + CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); +} + +void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { + secp256k1_fe z2s; + secp256k1_fe u1, u2, s1, s2; + CHECK(a->infinity == b->infinity); + if (a->infinity) { + return; + } + /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ + secp256k1_fe_sqr(&z2s, &b->z); + secp256k1_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; secp256k1_fe_normalize_weak(&u2); + secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); + s2 = b->y; secp256k1_fe_normalize_weak(&s2); + CHECK(secp256k1_fe_equal_var(&u1, &u2)); + CHECK(secp256k1_fe_equal_var(&s1, &s2)); +} + +void random_fe(secp256k1_fe *x) { + unsigned char bin[32]; + do { + secp256k1_rand256(bin); + if (secp256k1_fe_set_b32(x, bin)) { + return; + } + } while(1); +} +/** END stolen from tests.c */ + +int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, + const unsigned char *key32, const unsigned char *algo16, + void *data, unsigned int attempt) { + secp256k1_scalar s; + int *idata = data; + (void)msg32; + (void)key32; + (void)algo16; + /* Some nonces cannot be used because they'd cause s and/or r to be zero. + * The signing function has retry logic here that just re-calls the nonce + * function with an increased `attempt`. So if attempt > 0 this means we + * need to change the nonce to avoid an infinite loop. */ + if (attempt > 0) { + (*idata)++; + } + secp256k1_scalar_set_int(&s, *idata); + secp256k1_scalar_get_b32(nonce32, &s); + return 1; +} + +#ifdef USE_ENDOMORPHISM +void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { + int i; + for (i = 0; i < order; i++) { + secp256k1_ge res; + secp256k1_ge_mul_lambda(&res, &group[i]); + ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); + } +} +#endif + +void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { + int i, j; + + /* Sanity-check (and check infinity functions) */ + CHECK(secp256k1_ge_is_infinity(&group[0])); + CHECK(secp256k1_gej_is_infinity(&groupj[0])); + for (i = 1; i < order; i++) { + CHECK(!secp256k1_ge_is_infinity(&group[i])); + CHECK(!secp256k1_gej_is_infinity(&groupj[i])); + } + + /* Check all addition formulae */ + for (j = 0; j < order; j++) { + secp256k1_fe fe_inv; + secp256k1_fe_inv(&fe_inv, &groupj[j].z); + for (i = 0; i < order; i++) { + secp256k1_ge zless_gej; + secp256k1_gej tmp; + /* add_var */ + secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + ge_equals_gej(&group[(i + j) % order], &tmp); + /* add_ge */ + if (j > 0) { + secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); + ge_equals_gej(&group[(i + j) % order], &tmp); + } + /* add_ge_var */ + secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + ge_equals_gej(&group[(i + j) % order], &tmp); + /* add_zinv_var */ + zless_gej.infinity = groupj[j].infinity; + zless_gej.x = groupj[j].x; + zless_gej.y = groupj[j].y; + secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + ge_equals_gej(&group[(i + j) % order], &tmp); + } + } + + /* Check doubling */ + for (i = 0; i < order; i++) { + secp256k1_gej tmp; + if (i > 0) { + secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL); + ge_equals_gej(&group[(2 * i) % order], &tmp); + } + secp256k1_gej_double_var(&tmp, &groupj[i], NULL); + ge_equals_gej(&group[(2 * i) % order], &tmp); + } + + /* Check negation */ + for (i = 1; i < order; i++) { + secp256k1_ge tmp; + secp256k1_gej tmpj; + secp256k1_ge_neg(&tmp, &group[i]); + ge_equals_ge(&group[order - i], &tmp); + secp256k1_gej_neg(&tmpj, &groupj[i]); + ge_equals_gej(&group[order - i], &tmpj); + } +} + +void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { + int i, j, r_log; + for (r_log = 1; r_log < order; r_log++) { + for (j = 0; j < order; j++) { + for (i = 0; i < order; i++) { + secp256k1_gej tmp; + secp256k1_scalar na, ng; + secp256k1_scalar_set_int(&na, i); + secp256k1_scalar_set_int(&ng, j); + + secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); + ge_equals_gej(&group[(i * r_log + j) % order], &tmp); + + if (i > 0) { + secp256k1_ecmult_const(&tmp, &group[i], &ng); + ge_equals_gej(&group[(i * j) % order], &tmp); + } + } + } + } +} + +void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { + secp256k1_fe x; + unsigned char x_bin[32]; + k %= EXHAUSTIVE_TEST_ORDER; + x = group[k].x; + secp256k1_fe_normalize(&x); + secp256k1_fe_get_b32(x_bin, &x); + secp256k1_scalar_set_b32(r, x_bin, NULL); +} + +void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + int s, r, msg, key; + for (s = 1; s < order; s++) { + for (r = 1; r < order; r++) { + for (msg = 1; msg < order; msg++) { + for (key = 1; key < order; key++) { + secp256k1_ge nonconst_ge; + secp256k1_ecdsa_signature sig; + secp256k1_pubkey pk; + secp256k1_scalar sk_s, msg_s, r_s, s_s; + secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + int k, should_verify; + unsigned char msg32[32]; + + secp256k1_scalar_set_int(&s_s, s); + secp256k1_scalar_set_int(&r_s, r); + secp256k1_scalar_set_int(&msg_s, msg); + secp256k1_scalar_set_int(&sk_s, key); + + /* Verify by hand */ + /* Run through every k value that gives us this r and check that *one* works. + * Note there could be none, there could be multiple, ECDSA is weird. */ + should_verify = 0; + for (k = 0; k < order; k++) { + secp256k1_scalar check_x_s; + r_from_k(&check_x_s, group, k); + if (r_s == check_x_s) { + secp256k1_scalar_set_int(&s_times_k_s, k); + secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + } + } + /* nb we have a "high s" rule */ + should_verify &= !secp256k1_scalar_is_high(&s_s); + + /* Verify by calling verify */ + secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); + memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); + secp256k1_pubkey_save(&pk, &nonconst_ge); + secp256k1_scalar_get_b32(msg32, &msg_s); + CHECK(should_verify == + secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + } + } + } + } +} + +void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + int i, j, k; + + /* Loop */ + for (i = 1; i < order; i++) { /* message */ + for (j = 1; j < order; j++) { /* key */ + for (k = 1; k < order; k++) { /* nonce */ + secp256k1_ecdsa_signature sig; + secp256k1_scalar sk, msg, r, s, expected_r; + unsigned char sk32[32], msg32[32]; + secp256k1_scalar_set_int(&msg, i); + secp256k1_scalar_set_int(&sk, j); + secp256k1_scalar_get_b32(sk32, &sk); + secp256k1_scalar_get_b32(msg32, &msg); + + secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + + secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + /* Note that we compute expected_r *after* signing -- this is important + * because our nonce-computing function function might change k during + * signing. */ + r_from_k(&expected_r, group, k); + CHECK(r == expected_r); + CHECK((k * s) % order == (i + r * j) % order || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + } + } + } + + /* We would like to verify zero-knowledge here by counting how often every + * possible (s, r) tuple appears, but because the group order is larger + * than the field order, when coercing the x-values to scalar values, some + * appear more often than others, so we are actually not zero-knowledge. + * (This effect also appears in the real code, but the difference is on the + * order of 1/2^128th the field order, so the deviation is not useful to a + * computationally bounded attacker.) + */ +} + +int main(void) { + int i; + secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; + secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; + + /* Build context */ + secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + + /* TODO set z = 1, then do num_tests runs with random z values */ + + /* Generate the entire group */ + secp256k1_gej_set_infinity(&groupj[0]); + secp256k1_ge_set_gej(&group[0], &groupj[0]); + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { + /* Set a different random z-value for each Jacobian point */ + secp256k1_fe z; + random_fe(&z); + + secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); + secp256k1_ge_set_gej(&group[i], &groupj[i]); + secp256k1_gej_rescale(&groupj[i], &z); + + /* Verify against ecmult_gen */ + { + secp256k1_scalar scalar_i; + secp256k1_gej generatedj; + secp256k1_ge generated; + + secp256k1_scalar_set_int(&scalar_i, i); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + secp256k1_ge_set_gej(&generated, &generatedj); + + CHECK(group[i].infinity == 0); + CHECK(generated.infinity == 0); + CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); + CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); + } + } + + /* Run the tests */ +#ifdef USE_ENDOMORPHISM + test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER); +#endif + test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); + + return 0; +} + diff --git a/src/serialize.h b/src/serialize.h index 1f51da82ff..2be6f005e3 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -13,6 +13,7 @@ #include <ios> #include <limits> #include <map> +#include <memory> #include <set> #include <stdint.h> #include <string> @@ -25,6 +26,20 @@ static const unsigned int MAX_SIZE = 0x02000000; /** + * Dummy data type to identify deserializing constructors. + * + * By convention, a constructor of a type T with signature + * + * template <typename Stream> T::T(deserialize_type, Stream& s) + * + * is a deserializing constructor, which builds the type by + * deserializing it from s. If T contains const fields, this + * is likely the only way to do so. + */ +struct deserialize_type {}; +constexpr deserialize_type deserialize {}; + +/** * Used to bypass the rule against non-const reference to temporary * where it makes sense with wrappers such as CFlatData or CTxDB */ @@ -44,34 +59,6 @@ inline T* NCONST_PTR(const T* val) return const_cast<T*>(val); } -/** - * Important: Do not use the following functions in new code, but use v.data() - * and v.data() + v.size() respectively directly. They were once introduced to - * have a compatible, safe way to get the begin and end pointer of a vector. - * However with C++11 the language has built-in functionality for this and it's - * more readable to just use that. - */ -template <typename V> -inline typename V::value_type* begin_ptr(V& v) -{ - return v.data(); -} -template <typename V> -inline const typename V::value_type* begin_ptr(const V& v) -{ - return v.data(); -} -template <typename V> -inline typename V::value_type* end_ptr(V& v) -{ - return v.data() + v.size(); -} -template <typename V> -inline const typename V::value_type* end_ptr(const V& v) -{ - return v.data() + v.size(); -} - /* * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests @@ -151,6 +138,8 @@ inline float ser_uint32_to_float(uint32_t y) // i.e. anything that supports .read(char*, size_t) and .write(char*, size_t) // +class CSizeComputer; + enum { // primary actions @@ -159,7 +148,8 @@ enum SER_GETHASH = (1 << 2), }; -#define READWRITE(obj) (::SerReadWrite(s, (obj), nType, nVersion, ser_action)) +#define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action)) +#define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) /** * Implement three methods for serializable objects. These are actually wrappers over @@ -167,63 +157,42 @@ enum * code. Adding "ADD_SERIALIZE_METHODS" in the body of the class causes these wrappers to be * added as members. */ -#define ADD_SERIALIZE_METHODS \ - size_t GetSerializeSize(int nType, int nVersion) const { \ - CSizeComputer s(nType, nVersion); \ - NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize(), nType, nVersion);\ - return s.size(); \ - } \ - template<typename Stream> \ - void Serialize(Stream& s, int nType, int nVersion) const { \ - NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize(), nType, nVersion);\ - } \ - template<typename Stream> \ - void Unserialize(Stream& s, int nType, int nVersion) { \ - SerializationOp(s, CSerActionUnserialize(), nType, nVersion); \ +#define ADD_SERIALIZE_METHODS \ + template<typename Stream> \ + void Serialize(Stream& s) const { \ + NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \ + } \ + template<typename Stream> \ + void Unserialize(Stream& s) { \ + SerializationOp(s, CSerActionUnserialize()); \ } -/* - * Basic Types - */ -inline unsigned int GetSerializeSize(char a, int, int=0) { return 1; } -inline unsigned int GetSerializeSize(int8_t a, int, int=0) { return 1; } -inline unsigned int GetSerializeSize(uint8_t a, int, int=0) { return 1; } -inline unsigned int GetSerializeSize(int16_t a, int, int=0) { return 2; } -inline unsigned int GetSerializeSize(uint16_t a, int, int=0) { return 2; } -inline unsigned int GetSerializeSize(int32_t a, int, int=0) { return 4; } -inline unsigned int GetSerializeSize(uint32_t a, int, int=0) { return 4; } -inline unsigned int GetSerializeSize(int64_t a, int, int=0) { return 8; } -inline unsigned int GetSerializeSize(uint64_t a, int, int=0) { return 8; } -inline unsigned int GetSerializeSize(float a, int, int=0) { return 4; } -inline unsigned int GetSerializeSize(double a, int, int=0) { return 8; } - -template<typename Stream> inline void Serialize(Stream& s, char a, int, int=0) { ser_writedata8(s, a); } // TODO Get rid of bare char -template<typename Stream> inline void Serialize(Stream& s, int8_t a, int, int=0) { ser_writedata8(s, a); } -template<typename Stream> inline void Serialize(Stream& s, uint8_t a, int, int=0) { ser_writedata8(s, a); } -template<typename Stream> inline void Serialize(Stream& s, int16_t a, int, int=0) { ser_writedata16(s, a); } -template<typename Stream> inline void Serialize(Stream& s, uint16_t a, int, int=0) { ser_writedata16(s, a); } -template<typename Stream> inline void Serialize(Stream& s, int32_t a, int, int=0) { ser_writedata32(s, a); } -template<typename Stream> inline void Serialize(Stream& s, uint32_t a, int, int=0) { ser_writedata32(s, a); } -template<typename Stream> inline void Serialize(Stream& s, int64_t a, int, int=0) { ser_writedata64(s, a); } -template<typename Stream> inline void Serialize(Stream& s, uint64_t a, int, int=0) { ser_writedata64(s, a); } -template<typename Stream> inline void Serialize(Stream& s, float a, int, int=0) { ser_writedata32(s, ser_float_to_uint32(a)); } -template<typename Stream> inline void Serialize(Stream& s, double a, int, int=0) { ser_writedata64(s, ser_double_to_uint64(a)); } - -template<typename Stream> inline void Unserialize(Stream& s, char& a, int, int=0) { a = ser_readdata8(s); } // TODO Get rid of bare char -template<typename Stream> inline void Unserialize(Stream& s, int8_t& a, int, int=0) { a = ser_readdata8(s); } -template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a, int, int=0) { a = ser_readdata8(s); } -template<typename Stream> inline void Unserialize(Stream& s, int16_t& a, int, int=0) { a = ser_readdata16(s); } -template<typename Stream> inline void Unserialize(Stream& s, uint16_t& a, int, int=0) { a = ser_readdata16(s); } -template<typename Stream> inline void Unserialize(Stream& s, int32_t& a, int, int=0) { a = ser_readdata32(s); } -template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a, int, int=0) { a = ser_readdata32(s); } -template<typename Stream> inline void Unserialize(Stream& s, int64_t& a, int, int=0) { a = ser_readdata64(s); } -template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a, int, int=0) { a = ser_readdata64(s); } -template<typename Stream> inline void Unserialize(Stream& s, float& a, int, int=0) { a = ser_uint32_to_float(ser_readdata32(s)); } -template<typename Stream> inline void Unserialize(Stream& s, double& a, int, int=0) { a = ser_uint64_to_double(ser_readdata64(s)); } - -inline unsigned int GetSerializeSize(bool a, int, int=0) { return sizeof(char); } -template<typename Stream> inline void Serialize(Stream& s, bool a, int, int=0) { char f=a; ser_writedata8(s, f); } -template<typename Stream> inline void Unserialize(Stream& s, bool& a, int, int=0) { char f=ser_readdata8(s); a=f; } +template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char +template<typename Stream> inline void Serialize(Stream& s, int8_t a ) { ser_writedata8(s, a); } +template<typename Stream> inline void Serialize(Stream& s, uint8_t a ) { ser_writedata8(s, a); } +template<typename Stream> inline void Serialize(Stream& s, int16_t a ) { ser_writedata16(s, a); } +template<typename Stream> inline void Serialize(Stream& s, uint16_t a) { ser_writedata16(s, a); } +template<typename Stream> inline void Serialize(Stream& s, int32_t a ) { ser_writedata32(s, a); } +template<typename Stream> inline void Serialize(Stream& s, uint32_t a) { ser_writedata32(s, a); } +template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_writedata64(s, a); } +template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); } +template<typename Stream> inline void Serialize(Stream& s, float a ) { ser_writedata32(s, ser_float_to_uint32(a)); } +template<typename Stream> inline void Serialize(Stream& s, double a ) { ser_writedata64(s, ser_double_to_uint64(a)); } + +template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char +template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); } +template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a ) { a = ser_readdata8(s); } +template<typename Stream> inline void Unserialize(Stream& s, int16_t& a ) { a = ser_readdata16(s); } +template<typename Stream> inline void Unserialize(Stream& s, uint16_t& a) { a = ser_readdata16(s); } +template<typename Stream> inline void Unserialize(Stream& s, int32_t& a ) { a = ser_readdata32(s); } +template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a) { a = ser_readdata32(s); } +template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = ser_readdata64(s); } +template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); } +template<typename Stream> inline void Unserialize(Stream& s, float& a ) { a = ser_uint32_to_float(ser_readdata32(s)); } +template<typename Stream> inline void Unserialize(Stream& s, double& a ) { a = ser_uint64_to_double(ser_readdata64(s)); } + +template<typename Stream> inline void Serialize(Stream& s, bool a) { char f=a; ser_writedata8(s, f); } +template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=ser_readdata8(s); a=f; } @@ -245,6 +214,8 @@ inline unsigned int GetSizeOfCompactSize(uint64_t nSize) else return sizeof(unsigned char) + sizeof(uint64_t); } +inline void WriteCompactSize(CSizeComputer& os, uint64_t nSize); + template<typename Stream> void WriteCompactSize(Stream& os, uint64_t nSize) { @@ -339,6 +310,9 @@ inline unsigned int GetSizeOfVarInt(I n) return nRet; } +template<typename I> +inline void WriteVarInt(CSizeComputer& os, I n); + template<typename Stream, typename I> void WriteVarInt(Stream& os, I n) { @@ -388,33 +362,28 @@ public: template <class T, class TAl> explicit CFlatData(std::vector<T,TAl> &v) { - pbegin = (char*)begin_ptr(v); - pend = (char*)end_ptr(v); + pbegin = (char*)v.data(); + pend = (char*)(v.data() + v.size()); } template <unsigned int N, typename T, typename S, typename D> explicit CFlatData(prevector<N, T, S, D> &v) { - pbegin = (char*)begin_ptr(v); - pend = (char*)end_ptr(v); + pbegin = (char*)v.data(); + pend = (char*)(v.data() + v.size()); } char* begin() { return pbegin; } const char* begin() const { return pbegin; } char* end() { return pend; } const char* end() const { return pend; } - unsigned int GetSerializeSize(int, int=0) const - { - return pend - pbegin; - } - template<typename Stream> - void Serialize(Stream& s, int, int=0) const + void Serialize(Stream& s) const { s.write(pbegin, pend - pbegin); } template<typename Stream> - void Unserialize(Stream& s, int, int=0) + void Unserialize(Stream& s) { s.read(pbegin, pend - pbegin); } @@ -428,17 +397,13 @@ protected: public: CVarInt(I& nIn) : n(nIn) { } - unsigned int GetSerializeSize(int, int) const { - return GetSizeOfVarInt<I>(n); - } - template<typename Stream> - void Serialize(Stream &s, int, int) const { + void Serialize(Stream &s) const { WriteVarInt<Stream,I>(s, n); } template<typename Stream> - void Unserialize(Stream& s, int, int) { + void Unserialize(Stream& s) { n = ReadVarInt<Stream,I>(s); } }; @@ -450,17 +415,13 @@ protected: public: CCompactSize(uint64_t& nIn) : n(nIn) { } - unsigned int GetSerializeSize(int, int) const { - return GetSizeOfCompactSize(n); - } - template<typename Stream> - void Serialize(Stream &s, int, int) const { + void Serialize(Stream &s) const { WriteCompactSize<Stream>(s, n); } template<typename Stream> - void Unserialize(Stream& s, int, int) { + void Unserialize(Stream& s) { n = ReadCompactSize<Stream>(s); } }; @@ -471,10 +432,10 @@ class LimitedString protected: std::string& string; public: - LimitedString(std::string& string) : string(string) {} + LimitedString(std::string& _string) : string(_string) {} template<typename Stream> - void Unserialize(Stream& s, int, int=0) + void Unserialize(Stream& s) { size_t size = ReadCompactSize(s); if (size > Limit) { @@ -486,17 +447,12 @@ public: } template<typename Stream> - void Serialize(Stream& s, int, int=0) const + void Serialize(Stream& s) const { WriteCompactSize(s, string.size()); if (!string.empty()) s.write((char*)&string[0], string.size()); } - - unsigned int GetSerializeSize(int, int=0) const - { - return GetSizeOfCompactSize(string.size()) + string.size(); - } }; template<typename I> @@ -509,85 +465,76 @@ CVarInt<I> WrapVarInt(I& n) { return CVarInt<I>(n); } /** * string */ -template<typename C> unsigned int GetSerializeSize(const std::basic_string<C>& str, int, int=0); -template<typename Stream, typename C> void Serialize(Stream& os, const std::basic_string<C>& str, int, int=0); -template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str, int, int=0); +template<typename Stream, typename C> void Serialize(Stream& os, const std::basic_string<C>& str); +template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str); /** * prevector * prevectors of unsigned char are a special case and are intended to be serialized as a single opaque blob. */ -template<unsigned int N, typename T> unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const unsigned char&); -template<unsigned int N, typename T, typename V> unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const V&); -template<unsigned int N, typename T> inline unsigned int GetSerializeSize(const prevector<N, T>& v, int nType, int nVersion); -template<typename Stream, unsigned int N, typename T> void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const unsigned char&); -template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const V&); -template<typename Stream, unsigned int N, typename T> inline void Serialize(Stream& os, const prevector<N, T>& v, int nType, int nVersion); -template<typename Stream, unsigned int N, typename T> void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const unsigned char&); -template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const V&); -template<typename Stream, unsigned int N, typename T> inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion); +template<typename Stream, unsigned int N, typename T> void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&); +template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&); +template<typename Stream, unsigned int N, typename T> inline void Serialize(Stream& os, const prevector<N, T>& v); +template<typename Stream, unsigned int N, typename T> void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&); +template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&); +template<typename Stream, unsigned int N, typename T> inline void Unserialize(Stream& is, prevector<N, T>& v); /** * vector * vectors of unsigned char are a special case and are intended to be serialized as a single opaque blob. */ -template<typename T, typename A> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&); -template<typename T, typename A, typename V> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&); -template<typename T, typename A> inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion); -template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&); -template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&); -template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion); -template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&); -template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&); -template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion); +template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&); +template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&); +template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v); +template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&); +template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&); +template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v); /** * pair */ -template<typename K, typename T> unsigned int GetSerializeSize(const std::pair<K, T>& item, int nType, int nVersion); -template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item, int nType, int nVersion); -template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion); +template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item); +template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item); /** * map */ -template<typename K, typename T, typename Pred, typename A> unsigned int GetSerializeSize(const std::map<K, T, Pred, A>& m, int nType, int nVersion); -template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m, int nType, int nVersion); -template<typename Stream, typename K, typename T, typename Pred, typename A> void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion); +template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m); +template<typename Stream, typename K, typename T, typename Pred, typename A> void Unserialize(Stream& is, std::map<K, T, Pred, A>& m); /** * set */ -template<typename K, typename Pred, typename A> unsigned int GetSerializeSize(const std::set<K, Pred, A>& m, int nType, int nVersion); -template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m, int nType, int nVersion); -template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion); +template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m); +template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m); +/** + * shared_ptr + */ +template<typename Stream, typename T> void Serialize(Stream& os, const std::shared_ptr<const T>& p); +template<typename Stream, typename T> void Unserialize(Stream& os, std::shared_ptr<const T>& p); +/** + * unique_ptr + */ +template<typename Stream, typename T> void Serialize(Stream& os, const std::unique_ptr<const T>& p); +template<typename Stream, typename T> void Unserialize(Stream& os, std::unique_ptr<const T>& p); /** * If none of the specialized versions above matched, default to calling member function. - * "int nType" is changed to "long nType" to keep from getting an ambiguous overload error. - * The compiler will only cast int to long if none of the other templates matched. - * Thanks to Boost serialization for this idea. */ -template<typename T> -inline unsigned int GetSerializeSize(const T& a, long nType, int nVersion) -{ - return a.GetSerializeSize((int)nType, nVersion); -} - template<typename Stream, typename T> -inline void Serialize(Stream& os, const T& a, long nType, int nVersion) +inline void Serialize(Stream& os, const T& a) { - a.Serialize(os, (int)nType, nVersion); + a.Serialize(os); } template<typename Stream, typename T> -inline void Unserialize(Stream& is, T& a, long nType, int nVersion) +inline void Unserialize(Stream& is, T& a) { - a.Unserialize(is, (int)nType, nVersion); + a.Unserialize(is); } @@ -597,14 +544,8 @@ inline void Unserialize(Stream& is, T& a, long nType, int nVersion) /** * string */ -template<typename C> -unsigned int GetSerializeSize(const std::basic_string<C>& str, int, int) -{ - return GetSizeOfCompactSize(str.size()) + str.size() * sizeof(str[0]); -} - template<typename Stream, typename C> -void Serialize(Stream& os, const std::basic_string<C>& str, int, int) +void Serialize(Stream& os, const std::basic_string<C>& str) { WriteCompactSize(os, str.size()); if (!str.empty()) @@ -612,7 +553,7 @@ void Serialize(Stream& os, const std::basic_string<C>& str, int, int) } template<typename Stream, typename C> -void Unserialize(Stream& is, std::basic_string<C>& str, int, int) +void Unserialize(Stream& is, std::basic_string<C>& str) { unsigned int nSize = ReadCompactSize(is); str.resize(nSize); @@ -625,30 +566,8 @@ void Unserialize(Stream& is, std::basic_string<C>& str, int, int) /** * prevector */ -template<unsigned int N, typename T> -unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const unsigned char&) -{ - return (GetSizeOfCompactSize(v.size()) + v.size() * sizeof(T)); -} - -template<unsigned int N, typename T, typename V> -unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const V&) -{ - unsigned int nSize = GetSizeOfCompactSize(v.size()); - for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - nSize += GetSerializeSize((*vi), nType, nVersion); - return nSize; -} - -template<unsigned int N, typename T> -inline unsigned int GetSerializeSize(const prevector<N, T>& v, int nType, int nVersion) -{ - return GetSerializeSize_impl(v, nType, nVersion, T()); -} - - template<typename Stream, unsigned int N, typename T> -void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const unsigned char&) +void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&) { WriteCompactSize(os, v.size()); if (!v.empty()) @@ -656,22 +575,22 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersio } template<typename Stream, unsigned int N, typename T, typename V> -void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const V&) +void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&) { WriteCompactSize(os, v.size()); for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - ::Serialize(os, (*vi), nType, nVersion); + ::Serialize(os, (*vi)); } template<typename Stream, unsigned int N, typename T> -inline void Serialize(Stream& os, const prevector<N, T>& v, int nType, int nVersion) +inline void Serialize(Stream& os, const prevector<N, T>& v) { - Serialize_impl(os, v, nType, nVersion, T()); + Serialize_impl(os, v, T()); } template<typename Stream, unsigned int N, typename T> -void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const unsigned char&) +void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&) { // Limit size per read so bogus size value won't cause out of memory v.clear(); @@ -687,7 +606,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, c } template<typename Stream, unsigned int N, typename T, typename V> -void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const V&) +void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&) { v.clear(); unsigned int nSize = ReadCompactSize(is); @@ -700,14 +619,14 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, c nMid = nSize; v.resize(nMid); for (; i < nMid; i++) - Unserialize(is, v[i], nType, nVersion); + Unserialize(is, v[i]); } } template<typename Stream, unsigned int N, typename T> -inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion) +inline void Unserialize(Stream& is, prevector<N, T>& v) { - Unserialize_impl(is, v, nType, nVersion, T()); + Unserialize_impl(is, v, T()); } @@ -715,30 +634,8 @@ inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion) /** * vector */ -template<typename T, typename A> -unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&) -{ - return (GetSizeOfCompactSize(v.size()) + v.size() * sizeof(T)); -} - -template<typename T, typename A, typename V> -unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&) -{ - unsigned int nSize = GetSizeOfCompactSize(v.size()); - for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - nSize += GetSerializeSize((*vi), nType, nVersion); - return nSize; -} - -template<typename T, typename A> -inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion) -{ - return GetSerializeSize_impl(v, nType, nVersion, T()); -} - - template<typename Stream, typename T, typename A> -void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&) +void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&) { WriteCompactSize(os, v.size()); if (!v.empty()) @@ -746,22 +643,22 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVers } template<typename Stream, typename T, typename A, typename V> -void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&) +void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&) { WriteCompactSize(os, v.size()); for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - ::Serialize(os, (*vi), nType, nVersion); + ::Serialize(os, (*vi)); } template<typename Stream, typename T, typename A> -inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion) +inline void Serialize(Stream& os, const std::vector<T, A>& v) { - Serialize_impl(os, v, nType, nVersion, T()); + Serialize_impl(os, v, T()); } template<typename Stream, typename T, typename A> -void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&) +void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&) { // Limit size per read so bogus size value won't cause out of memory v.clear(); @@ -777,7 +674,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, } template<typename Stream, typename T, typename A, typename V> -void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&) +void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&) { v.clear(); unsigned int nSize = ReadCompactSize(is); @@ -790,14 +687,14 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, nMid = nSize; v.resize(nMid); for (; i < nMid; i++) - Unserialize(is, v[i], nType, nVersion); + Unserialize(is, v[i]); } } template<typename Stream, typename T, typename A> -inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion) +inline void Unserialize(Stream& is, std::vector<T, A>& v) { - Unserialize_impl(is, v, nType, nVersion, T()); + Unserialize_impl(is, v, T()); } @@ -805,24 +702,18 @@ inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersio /** * pair */ -template<typename K, typename T> -unsigned int GetSerializeSize(const std::pair<K, T>& item, int nType, int nVersion) -{ - return GetSerializeSize(item.first, nType, nVersion) + GetSerializeSize(item.second, nType, nVersion); -} - template<typename Stream, typename K, typename T> -void Serialize(Stream& os, const std::pair<K, T>& item, int nType, int nVersion) +void Serialize(Stream& os, const std::pair<K, T>& item) { - Serialize(os, item.first, nType, nVersion); - Serialize(os, item.second, nType, nVersion); + Serialize(os, item.first); + Serialize(os, item.second); } template<typename Stream, typename K, typename T> -void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion) +void Unserialize(Stream& is, std::pair<K, T>& item) { - Unserialize(is, item.first, nType, nVersion); - Unserialize(is, item.second, nType, nVersion); + Unserialize(is, item.first); + Unserialize(is, item.second); } @@ -830,25 +721,16 @@ void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion) /** * map */ -template<typename K, typename T, typename Pred, typename A> -unsigned int GetSerializeSize(const std::map<K, T, Pred, A>& m, int nType, int nVersion) -{ - unsigned int nSize = GetSizeOfCompactSize(m.size()); - for (typename std::map<K, T, Pred, A>::const_iterator mi = m.begin(); mi != m.end(); ++mi) - nSize += GetSerializeSize((*mi), nType, nVersion); - return nSize; -} - template<typename Stream, typename K, typename T, typename Pred, typename A> -void Serialize(Stream& os, const std::map<K, T, Pred, A>& m, int nType, int nVersion) +void Serialize(Stream& os, const std::map<K, T, Pred, A>& m) { WriteCompactSize(os, m.size()); for (typename std::map<K, T, Pred, A>::const_iterator mi = m.begin(); mi != m.end(); ++mi) - Serialize(os, (*mi), nType, nVersion); + Serialize(os, (*mi)); } template<typename Stream, typename K, typename T, typename Pred, typename A> -void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion) +void Unserialize(Stream& is, std::map<K, T, Pred, A>& m) { m.clear(); unsigned int nSize = ReadCompactSize(is); @@ -856,7 +738,7 @@ void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion for (unsigned int i = 0; i < nSize; i++) { std::pair<K, T> item; - Unserialize(is, item, nType, nVersion); + Unserialize(is, item); mi = m.insert(mi, item); } } @@ -866,25 +748,16 @@ void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion /** * set */ -template<typename K, typename Pred, typename A> -unsigned int GetSerializeSize(const std::set<K, Pred, A>& m, int nType, int nVersion) -{ - unsigned int nSize = GetSizeOfCompactSize(m.size()); - for (typename std::set<K, Pred, A>::const_iterator it = m.begin(); it != m.end(); ++it) - nSize += GetSerializeSize((*it), nType, nVersion); - return nSize; -} - template<typename Stream, typename K, typename Pred, typename A> -void Serialize(Stream& os, const std::set<K, Pred, A>& m, int nType, int nVersion) +void Serialize(Stream& os, const std::set<K, Pred, A>& m) { WriteCompactSize(os, m.size()); for (typename std::set<K, Pred, A>::const_iterator it = m.begin(); it != m.end(); ++it) - Serialize(os, (*it), nType, nVersion); + Serialize(os, (*it)); } template<typename Stream, typename K, typename Pred, typename A> -void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion) +void Unserialize(Stream& is, std::set<K, Pred, A>& m) { m.clear(); unsigned int nSize = ReadCompactSize(is); @@ -892,7 +765,7 @@ void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion) for (unsigned int i = 0; i < nSize; i++) { K key; - Unserialize(is, key, nType, nVersion); + Unserialize(is, key); it = m.insert(it, key); } } @@ -900,27 +773,61 @@ void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion) /** + * unique_ptr + */ +template<typename Stream, typename T> void +Serialize(Stream& os, const std::unique_ptr<const T>& p) +{ + Serialize(os, *p); +} + +template<typename Stream, typename T> +void Unserialize(Stream& is, std::unique_ptr<const T>& p) +{ + p.reset(new T(deserialize, is)); +} + + + +/** + * shared_ptr + */ +template<typename Stream, typename T> void +Serialize(Stream& os, const std::shared_ptr<const T>& p) +{ + Serialize(os, *p); +} + +template<typename Stream, typename T> +void Unserialize(Stream& is, std::shared_ptr<const T>& p) +{ + p = std::make_shared<const T>(deserialize, is); +} + + + +/** * Support for ADD_SERIALIZE_METHODS and READWRITE macro */ struct CSerActionSerialize { - bool ForRead() const { return false; } + constexpr bool ForRead() const { return false; } }; struct CSerActionUnserialize { - bool ForRead() const { return true; } + constexpr bool ForRead() const { return true; } }; template<typename Stream, typename T> -inline void SerReadWrite(Stream& s, const T& obj, int nType, int nVersion, CSerActionSerialize ser_action) +inline void SerReadWrite(Stream& s, const T& obj, CSerActionSerialize ser_action) { - ::Serialize(s, obj, nType, nVersion); + ::Serialize(s, obj); } template<typename Stream, typename T> -inline void SerReadWrite(Stream& s, T& obj, int nType, int nVersion, CSerActionUnserialize ser_action) +inline void SerReadWrite(Stream& s, T& obj, CSerActionUnserialize ser_action) { - ::Unserialize(s, obj, nType, nVersion); + ::Unserialize(s, obj); } @@ -931,33 +838,122 @@ inline void SerReadWrite(Stream& s, T& obj, int nType, int nVersion, CSerActionU +/* ::GetSerializeSize implementations + * + * Computing the serialized size of objects is done through a special stream + * object of type CSizeComputer, which only records the number of bytes written + * to it. + * + * If your Serialize or SerializationOp method has non-trivial overhead for + * serialization, it may be worthwhile to implement a specialized version for + * CSizeComputer, which uses the s.seek() method to record bytes that would + * be written instead. + */ class CSizeComputer { protected: size_t nSize; + const int nType; + const int nVersion; public: - int nType; - int nVersion; - CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {} - CSizeComputer& write(const char *psz, size_t nSize) + void write(const char *psz, size_t _nSize) { - this->nSize += nSize; - return *this; + this->nSize += _nSize; + } + + /** Pretend _nSize bytes are written, without specifying them. */ + void seek(size_t _nSize) + { + this->nSize += _nSize; } template<typename T> CSizeComputer& operator<<(const T& obj) { - ::Serialize(*this, obj, nType, nVersion); + ::Serialize(*this, obj); return (*this); } size_t size() const { return nSize; } + + int GetVersion() const { return nVersion; } + int GetType() const { return nType; } }; +template<typename Stream> +void SerializeMany(Stream& s) +{ +} + +template<typename Stream, typename Arg> +void SerializeMany(Stream& s, Arg&& arg) +{ + ::Serialize(s, std::forward<Arg>(arg)); +} + +template<typename Stream, typename Arg, typename... Args> +void SerializeMany(Stream& s, Arg&& arg, Args&&... args) +{ + ::Serialize(s, std::forward<Arg>(arg)); + ::SerializeMany(s, std::forward<Args>(args)...); +} + +template<typename Stream> +inline void UnserializeMany(Stream& s) +{ +} + +template<typename Stream, typename Arg> +inline void UnserializeMany(Stream& s, Arg& arg) +{ + ::Unserialize(s, arg); +} + +template<typename Stream, typename Arg, typename... Args> +inline void UnserializeMany(Stream& s, Arg& arg, Args&... args) +{ + ::Unserialize(s, arg); + ::UnserializeMany(s, args...); +} + +template<typename Stream, typename... Args> +inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, Args&&... args) +{ + ::SerializeMany(s, std::forward<Args>(args)...); +} + +template<typename Stream, typename... Args> +inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&... args) +{ + ::UnserializeMany(s, args...); +} + +template<typename I> +inline void WriteVarInt(CSizeComputer &s, I n) +{ + s.seek(GetSizeOfVarInt<I>(n)); +} + +inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize) +{ + s.seek(GetSizeOfCompactSize(nSize)); +} + +template <typename T> +size_t GetSerializeSize(const T& t, int nType, int nVersion = 0) +{ + return (CSizeComputer(nType, nVersion) << t).size(); +} + +template <typename S, typename T> +size_t GetSerializeSize(const S& s, const T& t) +{ + return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size(); +} + #endif // BITCOIN_SERIALIZE_H diff --git a/src/streams.h b/src/streams.h index 7132364eb1..b508784238 100644 --- a/src/streams.h +++ b/src/streams.h @@ -26,17 +26,18 @@ template<typename Stream> class OverrideStream { Stream* stream; -public: + const int nType; const int nVersion; +public: OverrideStream(Stream* stream_, int nType_, int nVersion_) : stream(stream_), nType(nType_), nVersion(nVersion_) {} template<typename T> OverrideStream<Stream>& operator<<(const T& obj) { // Serialize to this stream - ::Serialize(*this->stream, obj, nType, nVersion); + ::Serialize(*this, obj); return (*this); } @@ -44,9 +45,22 @@ public: OverrideStream<Stream>& operator>>(T& obj) { // Unserialize from this stream - ::Unserialize(*this->stream, obj, nType, nVersion); + ::Unserialize(*this, obj); return (*this); } + + void write(const char* pch, size_t nSize) + { + stream->write(pch, nSize); + } + + void read(char* pch, size_t nSize) + { + stream->read(pch, nSize); + } + + int GetVersion() const { return nVersion; } + int GetType() const { return nType; } }; template<typename S> @@ -55,6 +69,75 @@ OverrideStream<S> WithOrVersion(S* s, int nVersionFlag) return OverrideStream<S>(s, s->GetType(), s->GetVersion() | nVersionFlag); } +/* Minimal stream for overwriting and/or appending to an existing byte vector + * + * The referenced vector will grow as necessary + */ +class CVectorWriter +{ + public: + +/* + * @param[in] nTypeIn Serialization Type + * @param[in] nVersionIn Serialization Version (including any flags) + * @param[in] vchDataIn Referenced byte vector to overwrite/append + * @param[in] nPosIn Starting position. Vector index where writes should start. The vector will initially + * grow as necessary to max(index, vec.size()). So to append, use vec.size(). +*/ + CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn) : nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn), nPos(nPosIn) + { + if(nPos > vchData.size()) + vchData.resize(nPos); + } +/* + * (other params same as above) + * @param[in] args A list of items to serialize starting at nPos. +*/ + template <typename... Args> + CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn) + { + ::SerializeMany(*this, std::forward<Args>(args)...); + } + void write(const char* pch, size_t nSize) + { + assert(nPos <= vchData.size()); + size_t nOverwrite = std::min(nSize, vchData.size() - nPos); + if (nOverwrite) { + memcpy(vchData.data() + nPos, reinterpret_cast<const unsigned char*>(pch), nOverwrite); + } + if (nOverwrite < nSize) { + vchData.insert(vchData.end(), reinterpret_cast<const unsigned char*>(pch) + nOverwrite, reinterpret_cast<const unsigned char*>(pch) + nSize); + } + nPos += nSize; + } + template<typename T> + CVectorWriter& operator<<(const T& obj) + { + // Serialize to this stream + ::Serialize(*this, obj); + return (*this); + } + int GetVersion() const + { + return nVersion; + } + int GetType() const + { + return nType; + } + void seek(size_t nSize) + { + nPos += nSize; + if(nPos > vchData.size()) + vchData.resize(nPos); + } +private: + const int nType; + const int nVersion; + std::vector<unsigned char>& vchData; + size_t nPos; +}; + /** Double ended buffer combining vector and stream-like interfaces. * * >> and << read and write unformatted data using the above serialization templates. @@ -66,9 +149,10 @@ protected: typedef CSerializeData vector_type; vector_type vch; unsigned int nReadPos; -public: + int nType; int nVersion; +public: typedef vector_type::allocator_type allocator_type; typedef vector_type::size_type size_type; @@ -112,6 +196,13 @@ public: Init(nTypeIn, nVersionIn); } + template <typename... Args> + CDataStream(int nTypeIn, int nVersionIn, Args&&... args) + { + Init(nTypeIn, nVersionIn); + ::SerializeMany(*this, std::forward<Args>(args)...); + } + void Init(int nTypeIn, int nVersionIn) { nReadPos = 0; @@ -244,13 +335,11 @@ public: int in_avail() { return size(); } void SetType(int n) { nType = n; } - int GetType() { return nType; } + int GetType() const { return nType; } void SetVersion(int n) { nVersion = n; } - int GetVersion() { return nVersion; } - void ReadVersion() { *this >> nVersion; } - void WriteVersion() { *this << nVersion; } + int GetVersion() const { return nVersion; } - CDataStream& read(char* pch, size_t nSize) + void read(char* pch, size_t nSize) { // Read from the beginning of the buffer unsigned int nReadPosNext = nReadPos + nSize; @@ -263,14 +352,13 @@ public: memcpy(pch, &vch[nReadPos], nSize); nReadPos = 0; vch.clear(); - return (*this); + return; } memcpy(pch, &vch[nReadPos], nSize); nReadPos = nReadPosNext; - return (*this); } - CDataStream& ignore(int nSize) + void ignore(int nSize) { // Ignore from the beginning of the buffer if (nSize < 0) { @@ -283,21 +371,19 @@ public: throw std::ios_base::failure("CDataStream::ignore(): end of data"); nReadPos = 0; vch.clear(); - return (*this); + return; } nReadPos = nReadPosNext; - return (*this); } - CDataStream& write(const char* pch, size_t nSize) + void write(const char* pch, size_t nSize) { // Write to the end of the buffer vch.insert(vch.end(), pch, pch + nSize); - return (*this); } template<typename Stream> - void Serialize(Stream& s, int nType, int nVersion) const + void Serialize(Stream& s) const { // Special case: stream << stream concatenates like stream += stream if (!vch.empty()) @@ -305,17 +391,10 @@ public: } template<typename T> - unsigned int GetSerializeSize(const T& obj) - { - // Tells the size of the object if serialized to this stream - return ::GetSerializeSize(obj, nType, nVersion); - } - - template<typename T> CDataStream& operator<<(const T& obj) { // Serialize to this stream - ::Serialize(*this, obj, nType, nVersion); + ::Serialize(*this, obj); return (*this); } @@ -323,7 +402,7 @@ public: CDataStream& operator>>(T& obj) { // Unserialize from this stream - ::Unserialize(*this, obj, nType, nVersion); + ::Unserialize(*this, obj); return (*this); } @@ -378,17 +457,15 @@ private: CAutoFile(const CAutoFile&); CAutoFile& operator=(const CAutoFile&); - int nType; - int nVersion; - + const int nType; + const int nVersion; + FILE* file; public: - CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn) + CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) { file = filenew; - nType = nTypeIn; - nVersion = nVersionIn; } ~CAutoFile() @@ -423,23 +500,18 @@ public: // // Stream subset // - void SetType(int n) { nType = n; } - int GetType() { return nType; } - void SetVersion(int n) { nVersion = n; } - int GetVersion() { return nVersion; } - void ReadVersion() { *this >> nVersion; } - void WriteVersion() { *this << nVersion; } + int GetType() const { return nType; } + int GetVersion() const { return nVersion; } - CAutoFile& read(char* pch, size_t nSize) + void read(char* pch, size_t nSize) { if (!file) throw std::ios_base::failure("CAutoFile::read: file handle is NULL"); if (fread(pch, 1, nSize, file) != nSize) throw std::ios_base::failure(feof(file) ? "CAutoFile::read: end of file" : "CAutoFile::read: fread failed"); - return (*this); } - CAutoFile& ignore(size_t nSize) + void ignore(size_t nSize) { if (!file) throw std::ios_base::failure("CAutoFile::ignore: file handle is NULL"); @@ -450,23 +522,14 @@ public: throw std::ios_base::failure(feof(file) ? "CAutoFile::ignore: end of file" : "CAutoFile::read: fread failed"); nSize -= nNow; } - return (*this); } - CAutoFile& write(const char* pch, size_t nSize) + void write(const char* pch, size_t nSize) { if (!file) throw std::ios_base::failure("CAutoFile::write: file handle is NULL"); if (fwrite(pch, 1, nSize, file) != nSize) throw std::ios_base::failure("CAutoFile::write: write failed"); - return (*this); - } - - template<typename T> - unsigned int GetSerializeSize(const T& obj) - { - // Tells the size of the object if serialized to this stream - return ::GetSerializeSize(obj, nType, nVersion); } template<typename T> @@ -475,7 +538,7 @@ public: // Serialize to this stream if (!file) throw std::ios_base::failure("CAutoFile::operator<<: file handle is NULL"); - ::Serialize(*this, obj, nType, nVersion); + ::Serialize(*this, obj); return (*this); } @@ -485,7 +548,7 @@ public: // Unserialize from this stream if (!file) throw std::ios_base::failure("CAutoFile::operator>>: file handle is NULL"); - ::Unserialize(*this, obj, nType, nVersion); + ::Unserialize(*this, obj); return (*this); } }; @@ -503,8 +566,8 @@ private: CBufferedFile(const CBufferedFile&); CBufferedFile& operator=(const CBufferedFile&); - int nType; - int nVersion; + const int nType; + const int nVersion; FILE *src; // source file uint64_t nSrcPos; // how many bytes have been read from source @@ -534,11 +597,9 @@ protected: public: CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) : - nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0) + nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0) { src = fileIn; - nType = nTypeIn; - nVersion = nVersionIn; } ~CBufferedFile() @@ -546,6 +607,9 @@ public: fclose(); } + int GetVersion() const { return nVersion; } + int GetType() const { return nType; } + void fclose() { if (src) { @@ -560,7 +624,7 @@ public: } // read a number of bytes - CBufferedFile& read(char *pch, size_t nSize) { + void read(char *pch, size_t nSize) { if (nSize + nReadPos > nReadLimit) throw std::ios_base::failure("Read attempted past buffer limit"); if (nSize + nRewind > vchBuf.size()) @@ -579,7 +643,6 @@ public: pch += nNow; nSize -= nNow; } - return (*this); } // return the current reading position @@ -625,7 +688,7 @@ public: template<typename T> CBufferedFile& operator>>(T& obj) { // Unserialize from this stream - ::Unserialize(*this, obj, nType, nVersion); + ::Unserialize(*this, obj); return (*this); } diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 1ec40fe830..67064314ef 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -6,7 +6,8 @@ #ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H #define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H -#include "support/pagelocker.h" +#include "support/lockedpool.h" +#include "support/cleanse.h" #include <string> @@ -39,20 +40,15 @@ struct secure_allocator : public std::allocator<T> { T* allocate(std::size_t n, const void* hint = 0) { - T* p; - p = std::allocator<T>::allocate(n, hint); - if (p != NULL) - LockedPageManager::Instance().LockRange(p, sizeof(T) * n); - return p; + return static_cast<T*>(LockedPoolManager::Instance().alloc(sizeof(T) * n)); } void deallocate(T* p, std::size_t n) { if (p != NULL) { memory_cleanse(p, sizeof(T) * n); - LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); } - std::allocator<T>::deallocate(p, n); + LockedPoolManager::Instance().free(p); } }; diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp new file mode 100644 index 0000000000..01273c9791 --- /dev/null +++ b/src/support/lockedpool.cpp @@ -0,0 +1,385 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "support/lockedpool.h" +#include "support/cleanse.h" + +#if defined(HAVE_CONFIG_H) +#include "config/bitcoin-config.h" +#endif + +#ifdef WIN32 +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0501 +#define WIN32_LEAN_AND_MEAN 1 +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include <windows.h> +#else +#include <sys/mman.h> // for mmap +#include <sys/resource.h> // for getrlimit +#include <limits.h> // for PAGESIZE +#include <unistd.h> // for sysconf +#endif + +#include <algorithm> + +LockedPoolManager* LockedPoolManager::_instance = NULL; +std::once_flag LockedPoolManager::init_flag; + +/*******************************************************************************/ +// Utilities +// +/** Align up to power of 2 */ +static inline size_t align_up(size_t x, size_t align) +{ + return (x + align - 1) & ~(align - 1); +} + +/*******************************************************************************/ +// Implementation: Arena + +Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): + base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) +{ + // Start with one free chunk that covers the entire arena + chunks_free.emplace(base, size_in); +} + +Arena::~Arena() +{ +} + +void* Arena::alloc(size_t size) +{ + // Round to next multiple of alignment + size = align_up(size, alignment); + + // Don't handle zero-sized chunks + if (size == 0) + return nullptr; + + // Pick a large enough free-chunk + auto it = std::find_if(chunks_free.begin(), chunks_free.end(), + [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; }); + if (it == chunks_free.end()) + return nullptr; + + // Create the used-chunk, taking its space from the end of the free-chunk + auto alloced = chunks_used.emplace(it->first + it->second - size, size).first; + if (!(it->second -= size)) + chunks_free.erase(it); + return reinterpret_cast<void*>(alloced->first); +} + +/* extend the Iterator if other begins at its end */ +template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) { + if (it->first + it->second == other.first) { + it->second += other.second; + return true; + } + return false; +} + +void Arena::free(void *ptr) +{ + // Freeing the NULL pointer is OK. + if (ptr == nullptr) { + return; + } + + // Remove chunk from used map + auto i = chunks_used.find(static_cast<char*>(ptr)); + if (i == chunks_used.end()) { + throw std::runtime_error("Arena: invalid or double free"); + } + auto freed = *i; + chunks_used.erase(i); + + // Add space to free map, coalescing contiguous chunks + auto next = chunks_free.upper_bound(freed.first); + auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); + if (prev == chunks_free.end() || !extend(prev, freed)) + prev = chunks_free.emplace_hint(next, freed); + if (next != chunks_free.end() && extend(prev, *next)) + chunks_free.erase(next); +} + +Arena::Stats Arena::stats() const +{ + Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() }; + for (const auto& chunk: chunks_used) + r.used += chunk.second; + for (const auto& chunk: chunks_free) + r.free += chunk.second; + r.total = r.used + r.free; + return r; +} + +#ifdef ARENA_DEBUG +void printchunk(char* base, size_t sz, bool used) { + std::cout << + "0x" << std::hex << std::setw(16) << std::setfill('0') << base << + " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz << + " 0x" << used << std::endl; +} +void Arena::walk() const +{ + for (const auto& chunk: chunks_used) + printchunk(chunk.first, chunk.second, true); + std::cout << std::endl; + for (const auto& chunk: chunks_free) + printchunk(chunk.first, chunk.second, false); + std::cout << std::endl; +} +#endif + +/*******************************************************************************/ +// Implementation: Win32LockedPageAllocator + +#ifdef WIN32 +/** LockedPageAllocator specialized for Windows. + */ +class Win32LockedPageAllocator: public LockedPageAllocator +{ +public: + Win32LockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +Win32LockedPageAllocator::Win32LockedPageAllocator() +{ + // Determine system page size in bytes + SYSTEM_INFO sSysInfo; + GetSystemInfo(&sSysInfo); + page_size = sSysInfo.dwPageSize; +} +void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + len = align_up(len, page_size); + void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (addr) { + // VirtualLock is used to attempt to keep keying material out of swap. Note + // that it does not provide this as a guarantee, but, in practice, memory + // that has been VirtualLock'd almost never gets written to the pagefile + // except in rare circumstances where memory is extremely low. + *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0; + } + return addr; +} +void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + VirtualUnlock(const_cast<void*>(addr), len); +} + +size_t Win32LockedPageAllocator::GetLimit() +{ + // TODO is there a limit on windows, how to get it? + return std::numeric_limits<size_t>::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: PosixLockedPageAllocator + +#ifndef WIN32 +/** LockedPageAllocator specialized for OSes that don't try to be + * special snowflakes. + */ +class PosixLockedPageAllocator: public LockedPageAllocator +{ +public: + PosixLockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +PosixLockedPageAllocator::PosixLockedPageAllocator() +{ + // Determine system page size in bytes +#if defined(PAGESIZE) // defined in limits.h + page_size = PAGESIZE; +#else // assume some POSIX OS + page_size = sysconf(_SC_PAGESIZE); +#endif +} + +// Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define +// MAP_ANON which is deprecated +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + void *addr; + len = align_up(len, page_size); + addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (addr) { + *lockingSuccess = mlock(addr, len) == 0; + } + return addr; +} +void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + munlock(addr, len); + munmap(addr, len); +} +size_t PosixLockedPageAllocator::GetLimit() +{ +#ifdef RLIMIT_MEMLOCK + struct rlimit rlim; + if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { + if (rlim.rlim_cur != RLIM_INFINITY) { + return rlim.rlim_cur; + } + } +#endif + return std::numeric_limits<size_t>::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: LockedPool + +LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in): + allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) +{ +} + +LockedPool::~LockedPool() +{ +} +void* LockedPool::alloc(size_t size) +{ + std::lock_guard<std::mutex> lock(mutex); + + // Don't handle impossible sizes + if (size == 0 || size > ARENA_SIZE) + return nullptr; + + // Try allocating from each current arena + for (auto &arena: arenas) { + void *addr = arena.alloc(size); + if (addr) { + return addr; + } + } + // If that fails, create a new one + if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { + return arenas.back().alloc(size); + } + return nullptr; +} + +void LockedPool::free(void *ptr) +{ + std::lock_guard<std::mutex> lock(mutex); + // TODO we can do better than this linear search by keeping a map of arena + // extents to arena, and looking up the address. + for (auto &arena: arenas) { + if (arena.addressInArena(ptr)) { + arena.free(ptr); + return; + } + } + throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); +} + +LockedPool::Stats LockedPool::stats() const +{ + std::lock_guard<std::mutex> lock(mutex); + LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0}; + for (const auto &arena: arenas) { + Arena::Stats i = arena.stats(); + r.used += i.used; + r.free += i.free; + r.total += i.total; + r.chunks_used += i.chunks_used; + r.chunks_free += i.chunks_free; + } + return r; +} + +bool LockedPool::new_arena(size_t size, size_t align) +{ + bool locked; + // If this is the first arena, handle this specially: Cap the upper size + // by the process limit. This makes sure that the first arena will at least + // be locked. An exception to this is if the process limit is 0: + // in this case no memory can be locked at all so we'll skip past this logic. + if (arenas.empty()) { + size_t limit = allocator->GetLimit(); + if (limit > 0) { + size = std::min(size, limit); + } + } + void *addr = allocator->AllocateLocked(size, &locked); + if (!addr) { + return false; + } + if (locked) { + cumulative_bytes_locked += size; + } else if (lf_cb) { // Call the locking-failed callback if locking failed + if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed. + allocator->FreeLocked(addr, size); + return false; + } + } + arenas.emplace_back(allocator.get(), addr, size, align); + return true; +} + +LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): + Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) +{ +} +LockedPool::LockedPageArena::~LockedPageArena() +{ + allocator->FreeLocked(base, size); +} + +/*******************************************************************************/ +// Implementation: LockedPoolManager +// +LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator): + LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed) +{ +} + +bool LockedPoolManager::LockingFailed() +{ + // TODO: log something but how? without including util.h + return true; +} + +void LockedPoolManager::CreateInstance() +{ + // Using a local static instance guarantees that the object is initialized + // when it's first needed and also deinitialized after all objects that use + // it are done with it. I can think of one unlikely scenario where we may + // have a static deinitialization order/problem, but the check in + // LockedPoolManagerBase's destructor helps us detect if that ever happens. +#ifdef WIN32 + std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator()); +#else + std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator()); +#endif + static LockedPoolManager instance(std::move(allocator)); + LockedPoolManager::_instance = &instance; +} diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h new file mode 100644 index 0000000000..f5212bc266 --- /dev/null +++ b/src/support/lockedpool.h @@ -0,0 +1,231 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H +#define BITCOIN_SUPPORT_LOCKEDPOOL_H + +#include <stdint.h> +#include <list> +#include <map> +#include <mutex> +#include <memory> + +/** + * OS-dependent allocation and deallocation of locked/pinned memory pages. + * Abstract base class. + */ +class LockedPageAllocator +{ +public: + virtual ~LockedPageAllocator() {} + /** Allocate and lock memory pages. + * If len is not a multiple of the system page size, it is rounded up. + * Returns 0 in case of allocation failure. + * + * If locking the memory pages could not be accomplished it will still + * return the memory, however the lockingSuccess flag will be false. + * lockingSuccess is undefined if the allocation fails. + */ + virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0; + + /** Unlock and free memory pages. + * Clear the memory before unlocking. + */ + virtual void FreeLocked(void* addr, size_t len) = 0; + + /** Get the total limit on the amount of memory that may be locked by this + * process, in bytes. Return size_t max if there is no limit or the limit + * is unknown. Return 0 if no memory can be locked at all. + */ + virtual size_t GetLimit() = 0; +}; + +/* An arena manages a contiguous region of memory by dividing it into + * chunks. + */ +class Arena +{ +public: + Arena(void *base, size_t size, size_t alignment); + virtual ~Arena(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t chunks_used; + size_t chunks_free; + }; + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get arena usage statistics */ + Stats stats() const; + +#ifdef ARENA_DEBUG + void walk() const; +#endif + + /** Return whether a pointer points inside this arena. + * This returns base <= ptr < (base+size) so only use it for (inclusive) + * chunk starting addresses. + */ + bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } +private: + Arena(const Arena& other) = delete; // non construction-copyable + Arena& operator=(const Arena&) = delete; // non copyable + + /** Map of chunk address to chunk information. This class makes use of the + * sorted order to merge previous and next chunks during deallocation. + */ + std::map<char*, size_t> chunks_free; + std::map<char*, size_t> chunks_used; + /** Base address of arena */ + char* base; + /** End address of arena */ + char* end; + /** Minimum chunk alignment */ + size_t alignment; +}; + +/** Pool for locked memory chunks. + * + * To avoid sensitive key data from being swapped to disk, the memory in this pool + * is locked/pinned. + * + * An arena manages a contiguous region of memory. The pool starts out with one arena + * but can grow to multiple arenas if the need arises. + * + * Unlike a normal C heap, the administrative structures are separate from the managed + * memory. This has been done as the sizes and bases of objects are not in themselves sensitive + * information, as to conserve precious locked memory. In some operating systems + * the amount of memory that can be locked is small. + */ +class LockedPool +{ +public: + /** Size of one arena of locked memory. This is a compromise. + * Do not set this too low, as managing many arenas will increase + * allocation and deallocation overhead. Setting it too high allocates + * more locked memory from the OS than strictly necessary. + */ + static const size_t ARENA_SIZE = 256*1024; + /** Chunk alignment. Another compromise. Setting this too high will waste + * memory, setting it too low will facilitate fragmentation. + */ + static const size_t ARENA_ALIGN = 16; + + /** Callback when allocation succeeds but locking fails. + */ + typedef bool (*LockingFailed_Callback)(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t locked; + size_t chunks_used; + size_t chunks_free; + }; + + /** Create a new LockedPool. This takes ownership of the MemoryPageLocker, + * you can only instantiate this with LockedPool(std::move(...)). + * + * The second argument is an optional callback when locking a newly allocated arena failed. + * If this callback is provided and returns false, the allocation fails (hard fail), if + * it returns true the allocation proceeds, but it could warn. + */ + LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0); + ~LockedPool(); + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get pool usage statistics */ + Stats stats() const; +private: + LockedPool(const LockedPool& other) = delete; // non construction-copyable + LockedPool& operator=(const LockedPool&) = delete; // non copyable + + std::unique_ptr<LockedPageAllocator> allocator; + + /** Create an arena from locked pages */ + class LockedPageArena: public Arena + { + public: + LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); + ~LockedPageArena(); + private: + void *base; + size_t size; + LockedPageAllocator *allocator; + }; + + bool new_arena(size_t size, size_t align); + + std::list<LockedPageArena> arenas; + LockingFailed_Callback lf_cb; + size_t cumulative_bytes_locked; + /** Mutex protects access to this pool's data structures, including arenas. + */ + mutable std::mutex mutex; +}; + +/** + * Singleton class to keep track of locked (ie, non-swappable) memory, for use in + * std::allocator templates. + * + * Some implementations of the STL allocate memory in some constructors (i.e., see + * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) + * Due to the unpredictable order of static initializers, we have to make sure the + * LockedPoolManager instance exists before any other STL-based objects that use + * secure_allocator are created. So instead of having LockedPoolManager also be + * static-initialized, it is created on demand. + */ +class LockedPoolManager : public LockedPool +{ +public: + /** Return the current instance, or create it once */ + static LockedPoolManager& Instance() + { + std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); + return *LockedPoolManager::_instance; + } + +private: + LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator); + + /** Create a new LockedPoolManager specialized to the OS */ + static void CreateInstance(); + /** Called when locking fails, warn the user here */ + static bool LockingFailed(); + + static LockedPoolManager* _instance; + static std::once_flag init_flag; +}; + +#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp deleted file mode 100644 index 7cea2d88c5..0000000000 --- a/src/support/pagelocker.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include "support/pagelocker.h" - -#if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" -#endif - -#ifdef WIN32 -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0501 -#define WIN32_LEAN_AND_MEAN 1 -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include <windows.h> -// This is used to attempt to keep keying material out of swap -// Note that VirtualLock does not provide this as a guarantee on Windows, -// but, in practice, memory that has been VirtualLock'd almost never gets written to -// the pagefile except in rare circumstances where memory is extremely low. -#else -#include <sys/mman.h> -#include <limits.h> // for PAGESIZE -#include <unistd.h> // for sysconf -#endif - -LockedPageManager* LockedPageManager::_instance = NULL; -boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT; - -/** Determine system page size in bytes */ -static inline size_t GetSystemPageSize() -{ - size_t page_size; -#if defined(WIN32) - SYSTEM_INFO sSysInfo; - GetSystemInfo(&sSysInfo); - page_size = sSysInfo.dwPageSize; -#elif defined(PAGESIZE) // defined in limits.h - page_size = PAGESIZE; -#else // assume some POSIX OS - page_size = sysconf(_SC_PAGESIZE); -#endif - return page_size; -} - -bool MemoryPageLocker::Lock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualLock(const_cast<void*>(addr), len) != 0; -#else - return mlock(addr, len) == 0; -#endif -} - -bool MemoryPageLocker::Unlock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualUnlock(const_cast<void*>(addr), len) != 0; -#else - return munlock(addr, len) == 0; -#endif -} - -LockedPageManager::LockedPageManager() : LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize()) -{ -} diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h deleted file mode 100644 index 538bf39453..0000000000 --- a/src/support/pagelocker.h +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_SUPPORT_PAGELOCKER_H -#define BITCOIN_SUPPORT_PAGELOCKER_H - -#include "support/cleanse.h" - -#include <map> - -#include <boost/thread/mutex.hpp> -#include <boost/thread/once.hpp> - -/** - * Thread-safe class to keep track of locked (ie, non-swappable) memory pages. - * - * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() - * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when - * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page. - * - * @note By using a map from each page base address to lock count, this class is optimized for - * small objects that span up to a few pages, mostly smaller than a page. To support large allocations, - * something like an interval tree would be the preferred data structure. - */ -template <class Locker> -class LockedPageManagerBase -{ -public: - LockedPageManagerBase(size_t _page_size) : page_size(_page_size) - { - // Determine bitmask for extracting page from address - assert(!(_page_size & (_page_size - 1))); // size must be power of two - page_mask = ~(_page_size - 1); - } - - ~LockedPageManagerBase() - { - } - - - // For all pages in affected range, increase lock count - void LockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast<size_t>(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - if (it == histogram.end()) // Newly locked page - { - locker.Lock(reinterpret_cast<void*>(page), page_size); - histogram.insert(std::make_pair(page, 1)); - } else // Page was already locked; increase counter - { - it->second += 1; - } - } - } - - // For all pages in affected range, decrease lock count - void UnlockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast<size_t>(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - assert(it != histogram.end()); // Cannot unlock an area that was not locked - // Decrease counter for page, when it is zero, the page will be unlocked - it->second -= 1; - if (it->second == 0) // Nothing on the page anymore that keeps it locked - { - // Unlock page and remove the count from histogram - locker.Unlock(reinterpret_cast<void*>(page), page_size); - histogram.erase(it); - } - } - } - - // Get number of locked pages for diagnostics - int GetLockedPageCount() - { - boost::mutex::scoped_lock lock(mutex); - return histogram.size(); - } - -private: - Locker locker; - boost::mutex mutex; - size_t page_size, page_mask; - // map of page base address to lock count - typedef std::map<size_t, int> Histogram; - Histogram histogram; -}; - - -/** - * OS-dependent memory page locking/unlocking. - * Defined as policy class to make stubbing for test possible. - */ -class MemoryPageLocker -{ -public: - /** Lock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Lock(const void* addr, size_t len); - /** Unlock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Unlock(const void* addr, size_t len); -}; - -/** - * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in - * std::allocator templates. - * - * Some implementations of the STL allocate memory in some constructors (i.e., see - * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) - * Due to the unpredictable order of static initializers, we have to make sure the - * LockedPageManager instance exists before any other STL-based objects that use - * secure_allocator are created. So instead of having LockedPageManager also be - * static-initialized, it is created on demand. - */ -class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker> -{ -public: - static LockedPageManager& Instance() - { - boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); - return *LockedPageManager::_instance; - } - -private: - LockedPageManager(); - - static void CreateInstance() - { - // Using a local static instance guarantees that the object is initialized - // when it's first needed and also deinitialized after all objects that use - // it are done with it. I can think of one unlikely scenario where we may - // have a static deinitialization order/problem, but the check in - // LockedPageManagerBase's destructor helps us detect if that ever happens. - static LockedPageManager instance; - LockedPageManager::_instance = &instance; - } - - static LockedPageManager* _instance; - static boost::once_flag init_flag; -}; - -// -// Functions for directly locking/unlocking memory objects. -// Intended for non-dynamically allocated structures. -// -template <typename T> -void LockObject(const T& t) -{ - LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T)); -} - -template <typename T> -void UnlockObject(const T& t) -{ - memory_cleanse((void*)(&t), sizeof(T)); - LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T)); -} - -#endif // BITCOIN_SUPPORT_PAGELOCKER_H diff --git a/src/test/Checkpoints_tests.cpp b/src/test/Checkpoints_tests.cpp deleted file mode 100644 index 1b7d368e13..0000000000 --- a/src/test/Checkpoints_tests.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2011-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// -// Unit tests for block-chain checkpoints -// - -#include "checkpoints.h" - -#include "uint256.h" -#include "test/test_bitcoin.h" -#include "chainparams.h" - -#include <boost/test/unit_test.hpp> - -using namespace std; - -BOOST_FIXTURE_TEST_SUITE(Checkpoints_tests, BasicTestingSetup) - -BOOST_AUTO_TEST_CASE(sanity) -{ - const CCheckpointData& checkpoints = Params(CBaseChainParams::MAIN).Checkpoints(); - BOOST_CHECK(Checkpoints::GetTotalBlocksEstimate(checkpoints) >= 134444); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp index 97abeb7211..131d667e0b 100644 --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -6,8 +6,8 @@ #include "chainparams.h" #include "keystore.h" -#include "main.h" #include "net.h" +#include "net_processing.h" #include "pow.h" #include "script/sign.h" #include "serialize.h" @@ -29,9 +29,9 @@ extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans); struct COrphanTx { CTransaction tx; NodeId fromPeer; + int64_t nTimeExpire; }; extern std::map<uint256, COrphanTx> mapOrphanTransactions; -extern std::map<uint256, std::set<uint256> > mapOrphanTransactionsByPrev; CService ip(uint32_t i) { @@ -48,8 +48,9 @@ BOOST_AUTO_TEST_CASE(DoS_banning) { connman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, "", true); - GetNodeSignals().InitializeNode(dummyNode1.GetId(), &dummyNode1); + CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, "", true); + dummyNode1.SetSendVersion(PROTOCOL_VERSION); + GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; Misbehaving(dummyNode1.GetId(), 100); // Should get banned SendMessages(&dummyNode1, *connman); @@ -57,8 +58,9 @@ BOOST_AUTO_TEST_CASE(DoS_banning) BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned CAddress addr2(ip(0xa0b0c002), NODE_NONE); - CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, "", true); - GetNodeSignals().InitializeNode(dummyNode2.GetId(), &dummyNode2); + CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, "", true); + dummyNode2.SetSendVersion(PROTOCOL_VERSION); + GetNodeSignals().InitializeNode(&dummyNode2, *connman); dummyNode2.nVersion = 1; Misbehaving(dummyNode2.GetId(), 50); SendMessages(&dummyNode2, *connman); @@ -74,8 +76,9 @@ BOOST_AUTO_TEST_CASE(DoS_banscore) connman->ClearBanned(); mapArgs["-banscore"] = "111"; // because 11 is my favorite number CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, "", true); - GetNodeSignals().InitializeNode(dummyNode1.GetId(), &dummyNode1); + CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, "", true); + dummyNode1.SetSendVersion(PROTOCOL_VERSION); + GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; Misbehaving(dummyNode1.GetId(), 100); SendMessages(&dummyNode1, *connman); @@ -96,8 +99,9 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, "", true); - GetNodeSignals().InitializeNode(dummyNode.GetId(), &dummyNode); + CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, "", true); + dummyNode.SetSendVersion(PROTOCOL_VERSION); + GetNodeSignals().InitializeNode(&dummyNode, *connman); dummyNode.nVersion = 1; Misbehaving(dummyNode.GetId(), 100); @@ -198,7 +202,6 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) BOOST_CHECK(mapOrphanTransactions.size() <= 10); LimitOrphanTxSize(0); BOOST_CHECK(mapOrphanTransactions.empty()); - BOOST_CHECK(mapOrphanTransactionsByPrev.empty()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/README.md b/src/test/README.md index 3afdefe5fc..8f99804e10 100644 --- a/src/test/README.md +++ b/src/test/README.md @@ -1,4 +1,36 @@ -# Notes +### Compiling/running unit tests + +Unit tests will be automatically compiled if dependencies were met in `./configure` +and tests weren't explicitly disabled. + +After configuring, they can be run with `make check`. + +To run the bitcoind tests manually, launch `src/test/test_bitcoin`. + +To add more bitcoind tests, add `BOOST_AUTO_TEST_CASE` functions to the existing +.cpp files in the `test/` directory or add new .cpp files that +implement new BOOST_AUTO_TEST_SUITE sections. + +To run the bitcoin-qt tests manually, launch `src/qt/test/test_bitcoin-qt` + +To add more bitcoin-qt tests, add them to the `src/qt/test/` directory and +the `src/qt/test/test_main.cpp` file. + +### Running individual tests + +test_bitcoin has some built-in command-line arguments; for +example, to run just the getarg_tests verbosely: + + test_bitcoin --log_level=all --run_test=getarg_tests + +... or to run just the doubledash test: + + test_bitcoin --run_test=getarg_tests/doubledash + +Run `test_bitcoin --help` for the full list. + +### Note on adding test cases + The sources in this directory are unit test cases. Boost includes a unit testing framework, and since bitcoin already uses boost, it makes sense to simply use this framework rather than require developers to @@ -19,17 +51,6 @@ For further reading, I found the following website to be helpful in explaining how the boost unit test framework works: [http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/](http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/). -test_bitcoin has some built-in command-line arguments; for -example, to run just the getarg_tests verbosely: - - test_bitcoin --log_level=all --run_test=getarg_tests - -... or to run just the doubledash test: - - test_bitcoin --run_test=getarg_tests/doubledash - -Run `test_bitcoin --help` for the full list. - ### bitcoin-util-test.py The test directory also contains the bitcoin-util-test.py tool, which tests bitcoin utils (currently just bitcoin-tx). This test gets run automatically during the `make check` build process. It is also possible to run the test manually from the src directory: @@ -37,4 +58,4 @@ The test directory also contains the bitcoin-util-test.py tool, which tests bitc ``` test/bitcoin-util-test.py --srcdir=[current directory] -```
\ No newline at end of file +``` diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp index 613f6c12d7..77e9df5d82 100644 --- a/src/test/allocator_tests.cpp +++ b/src/test/allocator_tests.cpp @@ -11,110 +11,224 @@ BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup) -// Dummy memory page locker for platform independent tests -static const void *last_lock_addr, *last_unlock_addr; -static size_t last_lock_len, last_unlock_len; -class TestLocker +BOOST_AUTO_TEST_CASE(arena_tests) { -public: - bool Lock(const void *addr, size_t len) + // Fake memory base address for testing + // without actually using memory. + void *synth_base = reinterpret_cast<void*>(0x08000000); + const size_t synth_size = 1024*1024; + Arena b(synth_base, synth_size, 16); + void *chunk = b.alloc(1000); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(chunk != nullptr); + BOOST_CHECK(b.stats().used == 1008); // Aligned to 16 + BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared? + b.free(chunk); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 0); + BOOST_CHECK(b.stats().free == synth_size); + try { // Test exception on double-free + b.free(chunk); + BOOST_CHECK(0); + } catch(std::runtime_error &) { - last_lock_addr = addr; - last_lock_len = len; - return true; } - bool Unlock(const void *addr, size_t len) - { - last_unlock_addr = addr; - last_unlock_len = len; - return true; + + void *a0 = b.alloc(128); + void *a1 = b.alloc(256); + void *a2 = b.alloc(512); + BOOST_CHECK(b.stats().used == 896); + BOOST_CHECK(b.stats().total == synth_size); +#ifdef ARENA_DEBUG + b.walk(); +#endif + b.free(a0); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 768); + b.free(a1); + BOOST_CHECK(b.stats().used == 512); + void *a3 = b.alloc(128); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 640); + b.free(a2); + BOOST_CHECK(b.stats().used == 128); + b.free(a3); + BOOST_CHECK(b.stats().used == 0); + BOOST_CHECK_EQUAL(b.stats().chunks_used, 0); + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); + BOOST_CHECK_EQUAL(b.stats().chunks_free, 1); + + std::vector<void*> addr; + BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr +#ifdef ARENA_DEBUG + b.walk(); +#endif + // Sweeping allocate all memory + for (int x=0; x<1024; ++x) + addr.push_back(b.alloc(1024)); + BOOST_CHECK(b.stats().free == 0); + BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr + BOOST_CHECK(b.alloc(0) == nullptr); + for (int x=0; x<1024; ++x) + b.free(addr[x]); + addr.clear(); + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); + + // Now in the other direction... + for (int x=0; x<1024; ++x) + addr.push_back(b.alloc(1024)); + for (int x=0; x<1024; ++x) + b.free(addr[1023-x]); + addr.clear(); + + // Now allocate in smaller unequal chunks, then deallocate haphazardly + // Not all the chunks will succeed allocating, but freeing nullptr is + // allowed so that is no problem. + for (int x=0; x<2048; ++x) + addr.push_back(b.alloc(x+1)); + for (int x=0; x<2048; ++x) + b.free(addr[((x*23)%2048)^242]); + addr.clear(); + + // Go entirely wild: free and alloc interleaved, + // generate targets and sizes using pseudo-randomness. + for (int x=0; x<2048; ++x) + addr.push_back(0); + uint32_t s = 0x12345678; + for (int x=0; x<5000; ++x) { + int idx = s & (addr.size()-1); + if (s & 0x80000000) { + b.free(addr[idx]); + addr[idx] = 0; + } else if(!addr[idx]) { + addr[idx] = b.alloc((s >> 16) & 2047); + } + bool lsb = s & 1; + s >>= 1; + if (lsb) + s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 } -}; + for (void *ptr: addr) + b.free(ptr); + addr.clear(); -BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase) + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); +} + +/** Mock LockedPageAllocator for testing */ +class TestLockedPageAllocator: public LockedPageAllocator { - const size_t test_page_size = 4096; - LockedPageManagerBase<TestLocker> lpm(test_page_size); - size_t addr; - last_lock_addr = last_unlock_addr = 0; - last_lock_len = last_unlock_len = 0; - - /* Try large number of small objects */ - addr = 0; - for(int i=0; i<1000; ++i) - { - lpm.LockRange(reinterpret_cast<void*>(addr), 33); - addr += 33; - } - /* Try small number of page-sized objects, straddling two pages */ - addr = test_page_size*100 + 53; - for(int i=0; i<100; ++i) - { - lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size); - addr += test_page_size; - } - /* Try small number of page-sized objects aligned to exactly one page */ - addr = test_page_size*300; - for(int i=0; i<100; ++i) - { - lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size); - addr += test_page_size; - } - /* one very large object, straddling pages */ - lpm.LockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500); - BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(600+500))); - /* one very large object, page aligned */ - lpm.LockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1); - BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(1200+500-1))); - - BOOST_CHECK(lpm.GetLockedPageCount() == ( - (1000*33+test_page_size-1)/test_page_size + // small objects - 101 + 100 + // page-sized objects - 501 + 500)); // large objects - BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages - BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet - - /* And unlock again */ - addr = 0; - for(int i=0; i<1000; ++i) +public: + TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {} + void* AllocateLocked(size_t len, bool *lockingSuccess) { - lpm.UnlockRange(reinterpret_cast<void*>(addr), 33); - addr += 33; + *lockingSuccess = false; + if (count > 0) { + --count; + + if (lockedcount > 0) { + --lockedcount; + *lockingSuccess = true; + } + + return reinterpret_cast<void*>(0x08000000 + (count<<24)); // Fake address, do not actually use this memory + } + return 0; } - addr = test_page_size*100 + 53; - for(int i=0; i<100; ++i) + void FreeLocked(void* addr, size_t len) { - lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size); - addr += test_page_size; } - addr = test_page_size*300; - for(int i=0; i<100; ++i) + size_t GetLimit() { - lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size); - addr += test_page_size; + return std::numeric_limits<size_t>::max(); } - lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500); - lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1); +private: + int count; + int lockedcount; +}; - /* Check that everything is released */ - BOOST_CHECK(lpm.GetLockedPageCount() == 0); +BOOST_AUTO_TEST_CASE(lockedpool_tests_mock) +{ + // Test over three virtual arenas, of which one will succeed being locked + std::unique_ptr<LockedPageAllocator> x(new TestLockedPageAllocator(3, 1)); + LockedPool pool(std::move(x)); + BOOST_CHECK(pool.stats().total == 0); + BOOST_CHECK(pool.stats().locked == 0); - /* A few and unlocks of size zero (should have no effect) */ - addr = 0; - for(int i=0; i<1000; ++i) - { - lpm.LockRange(reinterpret_cast<void*>(addr), 0); - addr += 1; - } - BOOST_CHECK(lpm.GetLockedPageCount() == 0); - addr = 0; - for(int i=0; i<1000; ++i) + // Ensure unreasonable requests are refused without allocating anything + void *invalid_toosmall = pool.alloc(0); + BOOST_CHECK(invalid_toosmall == nullptr); + BOOST_CHECK(pool.stats().used == 0); + BOOST_CHECK(pool.stats().free == 0); + void *invalid_toobig = pool.alloc(LockedPool::ARENA_SIZE+1); + BOOST_CHECK(invalid_toobig == nullptr); + BOOST_CHECK(pool.stats().used == 0); + BOOST_CHECK(pool.stats().free == 0); + + void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a0); + BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE); + void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a1); + void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a2); + void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a3); + void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a4); + void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a5); + // We've passed a count of three arenas, so this allocation should fail + void *a6 = pool.alloc(16); + BOOST_CHECK(!a6); + + pool.free(a0); + pool.free(a2); + pool.free(a4); + pool.free(a1); + pool.free(a3); + pool.free(a5); + BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE); + BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE); + BOOST_CHECK(pool.stats().used == 0); +} + +// These tests used the live LockedPoolManager object, this is also used +// by other tests so the conditions are somewhat less controllable and thus the +// tests are somewhat more error-prone. +BOOST_AUTO_TEST_CASE(lockedpool_tests_live) +{ + LockedPoolManager &pool = LockedPoolManager::Instance(); + LockedPool::Stats initial = pool.stats(); + + void *a0 = pool.alloc(16); + BOOST_CHECK(a0); + // Test reading and writing the allocated memory + *((uint32_t*)a0) = 0x1234; + BOOST_CHECK(*((uint32_t*)a0) == 0x1234); + + pool.free(a0); + try { // Test exception on double-free + pool.free(a0); + BOOST_CHECK(0); + } catch(std::runtime_error &) { - lpm.UnlockRange(reinterpret_cast<void*>(addr), 0); - addr += 1; } - BOOST_CHECK(lpm.GetLockedPageCount() == 0); - BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages + // If more than one new arena was allocated for the above tests, something is wrong + BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE)); + // Usage must be back to where it started + BOOST_CHECK(pool.stats().used == initial.used); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp index ac3ab4c83f..04a6506655 100644 --- a/src/test/base58_tests.cpp +++ b/src/test/base58_tests.cpp @@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(base58_EncodeBase58) std::vector<unsigned char> sourcedata = ParseHex(test[0].get_str()); std::string base58string = test[1].get_str(); BOOST_CHECK_MESSAGE( - EncodeBase58(begin_ptr(sourcedata), end_ptr(sourcedata)) == base58string, + EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size()) == base58string, strTest); } } diff --git a/src/test/bctest.py b/src/test/bctest.py index d801415c70..adc5d0e418 100644 --- a/src/test/bctest.py +++ b/src/test/bctest.py @@ -1,4 +1,5 @@ -# Copyright 2014 BitPay, Inc. +# Copyright 2014 BitPay Inc. +# Copyright 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from __future__ import division,print_function,unicode_literals @@ -6,56 +7,115 @@ import subprocess import os import json import sys +import binascii +import difflib +import logging + +def parse_output(a, fmt): + """Parse the output according to specified format. + + Raise an error if the output can't be parsed.""" + if fmt == 'json': # json: compare parsed data + return json.loads(a) + elif fmt == 'hex': # hex: parse and compare binary data + return binascii.a2b_hex(a.strip()) + else: + raise NotImplementedError("Don't know how to compare %s" % fmt) def bctest(testDir, testObj, exeext): + """Runs a single test, comparing output and RC to expected output and RC. + + Raises an error if input can't be read, executable fails, or output/RC + are not as expected. Error is caught by bctester() and reported. + """ + # Get the exec names and arguments + execprog = testObj['exec'] + exeext + execargs = testObj['args'] + execrun = [execprog] + execargs + + # Read the input data (if there is any) + stdinCfg = None + inputData = None + if "input" in testObj: + filename = testDir + "/" + testObj['input'] + inputData = open(filename).read() + stdinCfg = subprocess.PIPE + + # Read the expected output data (if there is any) + outputFn = None + outputData = None + if "output_cmp" in testObj: + outputFn = testObj['output_cmp'] + outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare) + try: + outputData = open(testDir + "/" + outputFn).read() + except: + logging.error("Output file " + outputFn + " can not be opened") + raise + if not outputData: + logging.error("Output data missing for " + outputFn) + raise Exception + + # Run the test + proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True) + try: + outs = proc.communicate(input=inputData) + except OSError: + logging.error("OSError, Failed to execute " + execprog) + raise + + if outputData: + # Parse command output and expected output + try: + a_parsed = parse_output(outs[0], outputType) + except Exception as e: + logging.error('Error parsing command output as %s: %s' % (outputType,e)) + raise + try: + b_parsed = parse_output(outputData, outputType) + except Exception as e: + logging.error('Error parsing expected output %s as %s: %s' % (outputFn,outputType,e)) + raise + # Compare data + if a_parsed != b_parsed: + logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")") + raise Exception + # Compare formatting + if outs[0] != outputData: + error_message = "Output formatting mismatch for " + outputFn + ":\n" + error_message += "".join(difflib.context_diff(outputData.splitlines(True), + outs[0].splitlines(True), + fromfile=outputFn, + tofile="returned")) + logging.error(error_message) + raise Exception + + # Compare the return code to the expected return code + wantRC = 0 + if "return_code" in testObj: + wantRC = testObj['return_code'] + if proc.returncode != wantRC: + logging.error("Return code mismatch for " + outputFn) + raise Exception + +def bctester(testDir, input_basename, buildenv): + """ Loads and parses the input file, runs all tests and reports results""" + input_filename = testDir + "/" + input_basename + raw_data = open(input_filename).read() + input_data = json.loads(raw_data) + + failed_testcases = [] - execprog = testObj['exec'] + exeext - execargs = testObj['args'] - execrun = [execprog] + execargs - stdinCfg = None - inputData = None - if "input" in testObj: - filename = testDir + "/" + testObj['input'] - inputData = open(filename).read() - stdinCfg = subprocess.PIPE - - outputFn = None - outputData = None - if "output_cmp" in testObj: - outputFn = testObj['output_cmp'] - outputData = open(testDir + "/" + outputFn).read() - if not outputData: - print("Output data missing for " + outputFn) - sys.exit(1) - proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True) - try: - outs = proc.communicate(input=inputData) - except OSError: - print("OSError, Failed to execute " + execprog) - sys.exit(1) - - if outputData and (outs[0] != outputData): - print("Output data mismatch for " + outputFn) - sys.exit(1) - - wantRC = 0 - if "return_code" in testObj: - wantRC = testObj['return_code'] - if proc.returncode != wantRC: - print("Return code mismatch for " + outputFn) - sys.exit(1) - -def bctester(testDir, input_basename, buildenv, verbose = False): - input_filename = testDir + "/" + input_basename - raw_data = open(input_filename).read() - input_data = json.loads(raw_data) - - for testObj in input_data: - if verbose and "description" in testObj: - print ("Testing: " + testObj["description"]) - bctest(testDir, testObj, buildenv.exeext) - if verbose and "description" in testObj: - print ("PASS") - - sys.exit(0) + for testObj in input_data: + try: + bctest(testDir, testObj, buildenv.exeext) + logging.info("PASSED: " + testObj["description"]) + except: + logging.info("FAILED: " + testObj["description"]) + failed_testcases.append(testObj["description"]) + if failed_testcases: + logging.error("FAILED TESTCASES: [" + ", ".join(failed_testcases) + "]") + sys.exit(1) + else: + sys.exit(0) diff --git a/src/test/bitcoin-util-test.py b/src/test/bitcoin-util-test.py index 3099506d6d..1c090b3f3f 100755 --- a/src/test/bitcoin-util-test.py +++ b/src/test/bitcoin-util-test.py @@ -1,27 +1,32 @@ #!/usr/bin/env python -# Copyright 2014 BitPay, Inc. +# Copyright 2014 BitPay Inc. +# Copyright 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from __future__ import division,print_function,unicode_literals import os +import sys import bctest import buildenv import argparse +import logging help_text="""Test framework for bitcoin utils. Runs automatically during `make check`. -Can also be run manually from the src directory by specifiying the source directory: +Can also be run manually from the src directory by specifying the source directory: -test/bitcoin-util-test.py --src=[srcdir] +test/bitcoin-util-test.py --srcdir='srcdir' [--verbose] """ - if __name__ == '__main__': - verbose = False + # Try to get the source directory from the environment variables. This will + # be set for `make check` automated runs. If environment variable is not set, + # then get the source directory from command line args. try: srcdir = os.environ["srcdir"] + verbose = False except: parser = argparse.ArgumentParser(description=help_text) parser.add_argument('-s', '--srcdir') @@ -29,4 +34,13 @@ if __name__ == '__main__': args = parser.parse_args() srcdir = args.srcdir verbose = args.verbose - bctest.bctester(srcdir + "/test/data", "bitcoin-util-test.json", buildenv, verbose = verbose) + + if verbose: + level = logging.DEBUG + else: + level = logging.ERROR + formatter = '%(asctime)s - %(levelname)s - %(message)s' + # Add the format/level to the logger + logging.basicConfig(format = formatter, level=level) + + bctest.bctester(srcdir + "/test/data", "bitcoin-util-test.json", buildenv) diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 7530b013bd..7478758f73 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -26,21 +26,21 @@ static CBlock BuildBlockTestCase() { tx.vout[0].nValue = 42; block.vtx.resize(3); - block.vtx[0] = tx; + block.vtx[0] = MakeTransactionRef(tx); block.nVersion = 42; block.hashPrevBlock = GetRandHash(); block.nBits = 0x207fffff; tx.vin[0].prevout.hash = GetRandHash(); tx.vin[0].prevout.n = 0; - block.vtx[1] = tx; + block.vtx[1] = MakeTransactionRef(tx); tx.vin.resize(10); for (size_t i = 0; i < tx.vin.size(); i++) { tx.vin[i].prevout.hash = GetRandHash(); tx.vin[i].prevout.n = 0; } - block.vtx[2] = tx; + block.vtx[2] = MakeTransactionRef(tx); bool mutated; block.hashMerkleRoot = BlockMerkleRoot(block, &mutated); @@ -59,8 +59,8 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[2].GetHash(), entry.FromTx(block.vtx[2])); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2])); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); // Do a simple ShortTxIDs RT { @@ -78,14 +78,14 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) BOOST_CHECK(!partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); - std::list<CTransaction> removed; - pool.removeRecursive(block.vtx[2], removed); - BOOST_CHECK_EQUAL(removed.size(), 1); + size_t poolSize = pool.size(); + pool.removeRecursive(*block.vtx[2]); + BOOST_CHECK_EQUAL(pool.size(), poolSize - 1); CBlock block2; - std::vector<CTransaction> vtx_missing; + std::vector<CTransactionRef> vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_INVALID); // No transactions vtx_missing.push_back(block.vtx[2]); // Wrong transaction @@ -129,7 +129,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(header); READWRITE(nonce); size_t shorttxids_size = shorttxids.size(); @@ -152,8 +152,10 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[2].GetHash(), entry.FromTx(block.vtx[2])); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2])); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + + uint256 txhash; // Test with pre-forwarding tx 1, but not coinbase { @@ -161,8 +163,8 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) shortIDs.prefilledtxn.resize(1); shortIDs.prefilledtxn[0] = {1, block.vtx[1]}; shortIDs.shorttxids.resize(2); - shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0].GetHash()); - shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2].GetHash()); + shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0]->GetHash()); + shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2]->GetHash()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; @@ -176,10 +178,10 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) BOOST_CHECK( partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; - std::vector<CTransaction> vtx_missing; + std::vector<CTransactionRef> vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_INVALID); // No transactions vtx_missing.push_back(block.vtx[1]); // Wrong transaction @@ -194,9 +196,13 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); + txhash = block.vtx[2]->GetHash(); + block.vtx.clear(); + block2.vtx.clear(); + block3.vtx.clear(); + BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); } - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); } BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) @@ -205,8 +211,10 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[1].GetHash(), entry.FromTx(block.vtx[1])); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + pool.addUnchecked(block.vtx[1]->GetHash(), entry.FromTx(*block.vtx[1])); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + + uint256 txhash; // Test with pre-forwarding coinbase + tx 2 with tx 1 in mempool { @@ -215,7 +223,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) shortIDs.prefilledtxn[0] = {0, block.vtx[0]}; shortIDs.prefilledtxn[1] = {1, block.vtx[2]}; // id == 1 as it is 1 after index 1 shortIDs.shorttxids.resize(1); - shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1].GetHash()); + shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1]->GetHash()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; @@ -229,19 +237,22 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) BOOST_CHECK( partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); + BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; - std::vector<CTransaction> vtx_missing; + std::vector<CTransactionRef> vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); + txhash = block.vtx[1]->GetHash(); + block.vtx.clear(); + block2.vtx.clear(); + BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); } - BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); + BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); } BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) @@ -255,7 +266,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) CBlock block; block.vtx.resize(1); - block.vtx[0] = coinbase; + block.vtx[0] = MakeTransactionRef(std::move(coinbase)); block.nVersion = 42; block.hashPrevBlock = GetRandHash(); block.nBits = 0x207fffff; @@ -280,7 +291,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) BOOST_CHECK(partialBlock.IsTxAvailable(0)); CBlock block2; - std::vector<CTransaction> vtx_missing; + std::vector<CTransactionRef> vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp index 042fad42da..b15ff9e44b 100644 --- a/src/test/bloom_tests.cpp +++ b/src/test/bloom_tests.cpp @@ -30,18 +30,18 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) CBloomFilter filter(3, 0.01, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")); - BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!"); + BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); // One bit different in first byte - BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter contains something it shouldn't!"); + BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!"); filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")); - BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "BloomFilter doesn't contain just-inserted object (2)!"); + BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!"); filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")); - BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!"); + BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!"); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); - filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION); + stream << filter; vector<unsigned char> vch = ParseHex("03614e9b050000000000000001"); vector<char> expected(vch.size()); @@ -51,9 +51,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); - BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!"); + BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); filter.clear(); - BOOST_CHECK_MESSAGE( !filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter should be empty!"); + BOOST_CHECK_MESSAGE( !filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter should be empty!"); } BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak) @@ -62,18 +62,18 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak) CBloomFilter filter(3, 0.01, 2147483649UL, BLOOM_UPDATE_ALL); filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")); - BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!"); + BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); // One bit different in first byte - BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter contains something it shouldn't!"); + BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!"); filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")); - BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "BloomFilter doesn't contain just-inserted object (2)!"); + BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!"); filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")); - BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!"); + BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!"); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); - filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION); + stream << filter; vector<unsigned char> vch = ParseHex("03ce4299050000000100008001"); vector<char> expected(vch.size()); @@ -100,7 +100,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key) filter.insert(vector<unsigned char>(hash.begin(), hash.end())); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); - filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION); + stream << filter; vector<unsigned char> vch = ParseHex("038fc16b080000000000000001"); vector<char> expected(vch.size()); @@ -114,16 +114,14 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key) BOOST_AUTO_TEST_CASE(bloom_match) { // Random real transaction (b4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b) - CTransaction tx; CDataStream stream(ParseHex("01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339ffffffff021bff3d11000000001976a91404943fdd508053c75000106d3bc6e2754dbcff1988ac2f15de00000000001976a914a266436d2965547608b9e15d9032a7b9d64fa43188ac00000000"), SER_DISK, CLIENT_VERSION); - stream >> tx; + CTransaction tx(deserialize, stream); // and one which spends it (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436) unsigned char ch[] = {0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00}; vector<unsigned char> vch(ch, ch + sizeof(ch) -1); CDataStream spendStream(vch, SER_DISK, CLIENT_VERSION); - CTransaction spendingTx; - spendStream >> spendingTx; + CTransaction spendingTx(deserialize, spendStream); CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b")); diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index b487686136..79dea00e46 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -3,12 +3,12 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "coins.h" -#include "test_random.h" #include "script/standard.h" #include "uint256.h" #include "utilstrencodings.h" #include "test/test_bitcoin.h" -#include "main.h" +#include "test/test_random.h" +#include "validation.h" #include "consensus/validation.h" #include <vector> diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp index c7b4fb240c..7dcd548edf 100644 --- a/src/test/crypto_tests.cpp +++ b/src/test/crypto_tests.cpp @@ -9,9 +9,9 @@ #include "crypto/sha512.h" #include "crypto/hmac_sha256.h" #include "crypto/hmac_sha512.h" -#include "test_random.h" #include "utilstrencodings.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include <vector> diff --git a/src/test/data/bitcoin-util-test.json b/src/test/data/bitcoin-util-test.json index de95044597..98be75919b 100644 --- a/src/test/data/bitcoin-util-test.json +++ b/src/test/data/bitcoin-util-test.json @@ -103,6 +103,16 @@ "description": "Creates a new transaction with a single empty output script (output in json)" }, { "exec": "./bitcoin-tx", + "args": ["01000000000100000000000000000000000000"], + "output_cmp": "txcreate2.hex", + "description": "Parses a transation with no inputs and a single output script" + }, + { "exec": "./bitcoin-tx", + "args": ["-json", "01000000000100000000000000000000000000"], + "output_cmp": "txcreate2.json", + "description": "Parses a transation with no inputs and a single output script (output in json)" + }, + { "exec": "./bitcoin-tx", "args": ["-create", "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0", diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json index 2f299aa5fe..a3f47fcee2 100644 --- a/src/test/data/tx_valid.json +++ b/src/test/data/tx_valid.json @@ -53,7 +53,7 @@ ["The following tests for the presence of a bug in the handling of SIGHASH_SINGLE"], ["It results in signing the constant 1, instead of something generated based on the transaction,"], ["when the input doing the signing has an index greater than the maximum output index"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG"], ["0000000000000000000000000000000000000000000000000000000000000200", 0, "1"]], +[[["0000000000000000000000000000000000000000000000000000000000000200", 0, "1"], ["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG"]], "01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000", "P2SH"], ["An invalid P2SH Transaction"], @@ -334,9 +334,9 @@ "0100000000010100010000000000000000000000000000000000000000000000000000000000000000000023220020ff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3dbffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100aa5d8aa40a90f23ce2c3d11bc845ca4a12acd99cbea37de6b9f6d86edebba8cb022022dedc2aa0a255f74d04c0b76ece2d7c691f9dd11a64a8ac49f62a99c3a05f9d01232103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac00000000", "P2SH,WITNESS"], ["Witness with SigHash Single|AnyoneCanPay"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], +[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], ["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000], -["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], +["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], ["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]], "0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff05540b0000000000000151d0070000000000000151840300000000000001513c0f00000000000001512c010000000000000151000248304502210092f4777a0f17bf5aeb8ae768dec5f2c14feabf9d1fe2c89c78dfed0f13fdb86902206da90a86042e252bcd1e80a168c719e4a1ddcc3cebea24b9812c5453c79107e9832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71000000000000", "P2SH,WITNESS"], @@ -359,9 +359,9 @@ "0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b000000000000015100024730440220699e6b0cfe015b64ca3283e6551440a34f901ba62dd4c72fe1cb815afb2e6761022021cc5e84db498b1479de14efda49093219441adc6c543e5534979605e273d80b032103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"], ["Witness with SigHash None|AnyoneCanPay"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], +[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], +["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], ["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000], -["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], ["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]], "0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff04b60300000000000001519e070000000000000151860b00000000000001009600000000000000015100000248304502210091b32274295c2a3fa02f5bce92fb2789e3fc6ea947fbe1a76e52ea3f4ef2381a022079ad72aefa3837a2e0c033a8652a59731da05fa4a813f4fc48e87c075037256b822103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"], @@ -390,9 +390,9 @@ "01000000000103000100000000000000000000000000000000000000000000000000000000000000000000000200000000010000000000000000000000000000000000000000000000000000000000000100000000ffffffff000100000000000000000000000000000000000000000000000000000000000002000000000200000003e8030000000000000151d0070000000000000151b80b00000000000001510002473044022022fceb54f62f8feea77faac7083c3b56c4676a78f93745adc8a35800bc36adfa022026927df9abcf0a8777829bcfcce3ff0a385fa54c3f9df577405e3ef24ee56479022103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"], ["Witness with SigHash All|AnyoneCanPay"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], +[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], +["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100], ["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000], -["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100], ["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]], "0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff03e8030000000000000151d0070000000000000151b80b0000000000000151000002483045022100a3cec69b52cba2d2de623eeef89e0ba1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"], @@ -458,8 +458,8 @@ "0100000000010200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff02e8030000000000000151e90300000000000001510247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000", "P2SH,WITNESS"], ["Witness Single|AnyoneCanPay does not hash input's position (permutation)"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000], -["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1001]], +[[["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1001], +["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000]], "0100000000010200010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff02e9030000000000000151e80300000000000001510248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000", "P2SH,WITNESS"], ["Non witness Single|AnyoneCanPay hash input's position"], @@ -478,7 +478,7 @@ ["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215]], "01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"], -["BIP143 example: Same as the previous example with input-output paris swapped"], +["BIP143 example: Same as the previous example with input-output pairs swapped"], [[["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215], ["01c0cf7fba650638e55eb91261b183251fbb466f90dff17f10086817c542b5e9", 0, "0x00 0x20 0xba468eea561b26301e4cf69fa34bde4ad60c81e70f059f045ca9a79931004a4d", 16777215]], "0100000000010280e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffffe9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff0280969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac80969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"], diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index d4d825d199..2d791ee18d 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -254,7 +254,7 @@ struct StringContentsSerializer { ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { if (ser_action.ForRead()) { str.clear(); char c = 0; diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp index faaddffad8..e3531486aa 100644 --- a/src/test/limitedmap_tests.cpp +++ b/src/test/limitedmap_tests.cpp @@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(limitedmap_test) // make sure the item is present BOOST_CHECK(map.count(i) == 1); - // use the iterator to check for the expected key adn value + // use the iterator to check for the expected key and value BOOST_CHECK(it->first == i); BOOST_CHECK(it->second == i + 1); diff --git a/src/test/main_tests.cpp b/src/test/main_tests.cpp index dbfbdd934f..697fbf792c 100644 --- a/src/test/main_tests.cpp +++ b/src/test/main_tests.cpp @@ -3,7 +3,8 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chainparams.h" -#include "main.h" +#include "validation.h" +#include "net.h" #include "test/test_bitcoin.h" diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index 033a50f94f..bdd6eaf538 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -55,17 +55,17 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest) CTxMemPool testPool(CFeeRate(0)); - std::list<CTransaction> removed; // Nothing in pool, remove should do nothing: - testPool.removeRecursive(txParent, removed); - BOOST_CHECK_EQUAL(removed.size(), 0); + unsigned int poolSize = testPool.size(); + testPool.removeRecursive(txParent); + BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Just the parent: testPool.addUnchecked(txParent.GetHash(), entry.FromTx(txParent)); - testPool.removeRecursive(txParent, removed); - BOOST_CHECK_EQUAL(removed.size(), 1); - removed.clear(); + poolSize = testPool.size(); + testPool.removeRecursive(txParent); + BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1); // Parent, children, grandchildren: testPool.addUnchecked(txParent.GetHash(), entry.FromTx(txParent)); @@ -75,19 +75,21 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest) testPool.addUnchecked(txGrandChild[i].GetHash(), entry.FromTx(txGrandChild[i])); } // Remove Child[0], GrandChild[0] should be removed: - testPool.removeRecursive(txChild[0], removed); - BOOST_CHECK_EQUAL(removed.size(), 2); - removed.clear(); + poolSize = testPool.size(); + testPool.removeRecursive(txChild[0]); + BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2); // ... make sure grandchild and child are gone: - testPool.removeRecursive(txGrandChild[0], removed); - BOOST_CHECK_EQUAL(removed.size(), 0); - testPool.removeRecursive(txChild[0], removed); - BOOST_CHECK_EQUAL(removed.size(), 0); + poolSize = testPool.size(); + testPool.removeRecursive(txGrandChild[0]); + BOOST_CHECK_EQUAL(testPool.size(), poolSize); + poolSize = testPool.size(); + testPool.removeRecursive(txChild[0]); + BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Remove parent, all children/grandchildren should go: - testPool.removeRecursive(txParent, removed); - BOOST_CHECK_EQUAL(removed.size(), 5); + poolSize = testPool.size(); + testPool.removeRecursive(txParent); + BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5); BOOST_CHECK_EQUAL(testPool.size(), 0); - removed.clear(); // Add children and grandchildren, but NOT the parent (simulate the parent being in a block) for (int i = 0; i < 3; i++) @@ -97,10 +99,10 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest) } // Now remove the parent, as might happen if a block-re-org occurs but the parent cannot be // put into the mempool (maybe because it is non-standard): - testPool.removeRecursive(txParent, removed); - BOOST_CHECK_EQUAL(removed.size(), 6); + poolSize = testPool.size(); + testPool.removeRecursive(txParent); + BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6); BOOST_CHECK_EQUAL(testPool.size(), 0); - removed.clear(); } template<typename name> @@ -281,12 +283,11 @@ BOOST_AUTO_TEST_CASE(MempoolIndexingTest) BOOST_CHECK_EQUAL(pool.size(), 10); // Now try removing tx10 and verify the sort order returns to normal - std::list<CTransaction> removed; - pool.removeRecursive(pool.mapTx.find(tx10.GetHash())->GetTx(), removed); + pool.removeRecursive(pool.mapTx.find(tx10.GetHash())->GetTx()); CheckSort<descendant_score>(pool, snapshotOrder); - pool.removeRecursive(pool.mapTx.find(tx9.GetHash())->GetTx(), removed); - pool.removeRecursive(pool.mapTx.find(tx8.GetHash())->GetTx(), removed); + pool.removeRecursive(pool.mapTx.find(tx9.GetHash())->GetTx()); + pool.removeRecursive(pool.mapTx.find(tx8.GetHash())->GetTx()); /* Now check the sort on the mining score index. * Final order should be: * @@ -411,10 +412,9 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) CheckSort<ancestor_score>(pool, sortedOrder); /* after tx6 is mined, tx7 should move up in the sort */ - std::vector<CTransaction> vtx; - vtx.push_back(tx6); - std::list<CTransaction> dummy; - pool.removeForBlock(vtx, 1, dummy, false); + std::vector<CTransactionRef> vtx; + vtx.push_back(MakeTransactionRef(tx6)); + pool.removeForBlock(vtx, 1); sortedOrder.erase(sortedOrder.begin()+1); sortedOrder.pop_back(); @@ -548,13 +548,12 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) pool.addUnchecked(tx5.GetHash(), entry.Fee(1000LL).FromTx(tx5, &pool)); pool.addUnchecked(tx7.GetHash(), entry.Fee(9000LL).FromTx(tx7, &pool)); - std::vector<CTransaction> vtx; - std::list<CTransaction> conflicts; + std::vector<CTransactionRef> vtx; SetMockTime(42); SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + 1000); // ... we should keep the same min fee until we get a block - pool.removeForBlock(vtx, 1, conflicts); + pool.removeForBlock(vtx, 1); SetMockTime(42 + 2*CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + 1000)/2); // ... then feerate should drop 1/2 each halflife diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp index 66ca381ea7..55e6852a15 100644 --- a/src/test/merkle_tests.cpp +++ b/src/test/merkle_tests.cpp @@ -4,7 +4,7 @@ #include "consensus/merkle.h" #include "test/test_bitcoin.h" -#include "test_random.h" +#include "test/test_random.h" #include <boost/test/unit_test.hpp> @@ -15,8 +15,8 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve { vMerkleTree.clear(); vMerkleTree.reserve(block.vtx.size() * 2 + 16); // Safe upper bound for the number of total nodes. - for (std::vector<CTransaction>::const_iterator it(block.vtx.begin()); it != block.vtx.end(); ++it) - vMerkleTree.push_back(it->GetHash()); + for (std::vector<CTransactionRef>::const_iterator it(block.vtx.begin()); it != block.vtx.end(); ++it) + vMerkleTree.push_back((*it)->GetHash()); int j = 0; bool mutated = false; for (int nSize = block.vtx.size(); nSize > 1; nSize = (nSize + 1) / 2) @@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(merkle_test) for (int j = 0; j < ntx; j++) { CMutableTransaction mtx; mtx.nLockTime = j; - block.vtx[j] = mtx; + block.vtx[j] = MakeTransactionRef(std::move(mtx)); } // Compute the root of the block before mutating it. bool unmutatedMutated = false; @@ -126,7 +126,7 @@ BOOST_AUTO_TEST_CASE(merkle_test) std::vector<uint256> newBranch = BlockMerkleBranch(block, mtx); std::vector<uint256> oldBranch = BlockGetMerkleBranch(block, merkleTree, mtx); BOOST_CHECK(oldBranch == newBranch); - BOOST_CHECK(ComputeMerkleRootFromBranch(block.vtx[mtx].GetHash(), newBranch, mtx) == oldRoot); + BOOST_CHECK(ComputeMerkleRootFromBranch(block.vtx[mtx]->GetHash(), newBranch, mtx) == oldRoot); } } } diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index d6d7b5716e..892e731a7a 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -7,7 +7,7 @@ #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/validation.h" -#include "main.h" +#include "validation.h" #include "miner.h" #include "pubkey.h" #include "script/standard.h" @@ -77,7 +77,7 @@ bool TestSequenceLocks(const CTransaction &tx, int flags) // Implemented as an additional function, rather than a separate test case, // to allow reusing the blockchain created in CreateNewBlock_validity. // Note that this test assumes blockprioritysize is 0. -void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, std::vector<CTransaction *>& txFirst) +void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, std::vector<CTransactionRef>& txFirst) { // Test the ancestor feerate transaction selection. TestMemPoolEntryHelper entry; @@ -108,9 +108,9 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, mempool.addUnchecked(hashHighFeeTx, entry.Fee(50000).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey); - BOOST_CHECK(pblocktemplate->block.vtx[1].GetHash() == hashParentTx); - BOOST_CHECK(pblocktemplate->block.vtx[2].GetHash() == hashHighFeeTx); - BOOST_CHECK(pblocktemplate->block.vtx[3].GetHash() == hashMediumFeeTx); + BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashParentTx); + BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashHighFeeTx); + BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashMediumFeeTx); // Test that a package below the min relay fee doesn't get included tx.vin[0].prevout.hash = hashHighFeeTx; @@ -130,21 +130,20 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey); // Verify that the free tx and the low fee tx didn't get selected for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) { - BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx); - BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx); + BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx); + BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx); } // Test that packages above the min relay fee do get included, even if one // of the transactions is below the min relay fee // Remove the low fee transaction and replace with a higher fee transaction - std::list<CTransaction> dummy; - mempool.removeRecursive(tx, dummy); + mempool.removeRecursive(tx); tx.vout[0].nValue -= 2; // Now we should be just over the min relay fee hashLowFeeTx = tx.GetHash(); mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse+2).FromTx(tx)); pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey); - BOOST_CHECK(pblocktemplate->block.vtx[4].GetHash() == hashFreeTx); - BOOST_CHECK(pblocktemplate->block.vtx[5].GetHash() == hashLowFeeTx); + BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashFreeTx); + BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashLowFeeTx); // Test that transaction selection properly updates ancestor fee // calculations as ancestor transactions get included in a block. @@ -167,8 +166,8 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, // Verify that this tx isn't selected. for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) { - BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx2); - BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx2); + BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx2); + BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx2); } // This tx will be mineable, and should cause hashLowFeeTx2 to be selected @@ -177,7 +176,7 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee mempool.addUnchecked(tx.GetHash(), entry.Fee(10000).FromTx(tx)); pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey); - BOOST_CHECK(pblocktemplate->block.vtx[8].GetHash() == hashLowFeeTx2); + BOOST_CHECK(pblocktemplate->block.vtx[8]->GetHash() == hashLowFeeTx2); } // NOTE: These tests rely on CreateNewBlock doing its own self-validation! @@ -204,28 +203,28 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) // We can't make transactions until we have inputs // Therefore, load 100 blocks :) int baseheight = 0; - std::vector<CTransaction*>txFirst; + std::vector<CTransactionRef> txFirst; for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i) { CBlock *pblock = &pblocktemplate->block; // pointer for convenience pblock->nVersion = 1; pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1; - CMutableTransaction txCoinbase(pblock->vtx[0]); + CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); + txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vout[0].scriptPubKey = CScript(); - pblock->vtx[0] = CTransaction(txCoinbase); + pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); if (txFirst.size() == 0) baseheight = chainActive.Height(); if (txFirst.size() < 4) - txFirst.push_back(new CTransaction(pblock->vtx[0])); + txFirst.push_back(pblock->vtx[0]); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); pblock->nNonce = blockinfo[i].nonce; - CValidationState state; - BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL)); - BOOST_CHECK(state.IsValid()); + std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); + BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, NULL)); pblock->hashPrevBlock = pblock->GetHash(); } @@ -488,9 +487,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) TestPackageSelection(chainparams, scriptPubKey, txFirst); - BOOST_FOREACH(CTransaction *_tx, txFirst) - delete _tx; - fCheckpointsEnabled = true; } diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index f4b5768693..87cb38daac 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -17,7 +17,7 @@ using namespace std; class CAddrManSerializationMock : public CAddrMan { public: - virtual void Serialize(CDataStream& s, int nType, int nVersionDummy) const = 0; + virtual void Serialize(CDataStream& s) const = 0; //! Ensure that bucket placement is always the same for testing purposes. void MakeDeterministic() @@ -30,16 +30,16 @@ public: class CAddrManUncorrupted : public CAddrManSerializationMock { public: - void Serialize(CDataStream& s, int nType, int nVersionDummy) const + void Serialize(CDataStream& s) const { - CAddrMan::Serialize(s, nType, nVersionDummy); + CAddrMan::Serialize(s); } }; class CAddrManCorrupted : public CAddrManSerializationMock { public: - void Serialize(CDataStream& s, int nType, int nVersionDummy) const + void Serialize(CDataStream& s) const { // Produces corrupt output that claims addrman has 20 addrs when it only has one addr. unsigned char nVersion = 1; @@ -164,12 +164,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) bool fInboundIn = false; // Test that fFeeler is false by default. - CNode* pnode1 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, pszDest, fInboundIn); + CNode* pnode1 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, pszDest, fInboundIn); BOOST_CHECK(pnode1->fInbound == false); BOOST_CHECK(pnode1->fFeeler == false); fInboundIn = true; - CNode* pnode2 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, pszDest, fInboundIn); + CNode* pnode2 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, pszDest, fInboundIn); BOOST_CHECK(pnode2->fInbound == true); BOOST_CHECK(pnode2->fFeeler == false); } diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp index b7f83d38f0..e6b689bc6c 100644 --- a/src/test/pmt_tests.cpp +++ b/src/test/pmt_tests.cpp @@ -9,8 +9,8 @@ #include "uint256.h" #include "arith_uint256.h" #include "version.h" -#include "test_random.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include <vector> @@ -45,14 +45,14 @@ BOOST_AUTO_TEST_CASE(pmt_test1) for (unsigned int j=0; j<nTx; j++) { CMutableTransaction tx; tx.nLockTime = j; // actual transaction data doesn't matter; just make the nLockTime's unique - block.vtx.push_back(CTransaction(tx)); + block.vtx.push_back(MakeTransactionRef(std::move(tx))); } // calculate actual merkle root and height uint256 merkleRoot1 = BlockMerkleRoot(block); std::vector<uint256> vTxid(nTx, uint256()); for (unsigned int j=0; j<nTx; j++) - vTxid[j] = block.vtx[j].GetHash(); + vTxid[j] = block.vtx[j]->GetHash(); int nHeight = 1, nTx_ = nTx; while (nTx_ > 1) { nTx_ = (nTx_+1)/2; diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp index 5c902387f1..c57feaec90 100644 --- a/src/test/policyestimator_tests.cpp +++ b/src/test/policyestimator_tests.cpp @@ -19,26 +19,18 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) CTxMemPool mpool(CFeeRate(1000)); TestMemPoolEntryHelper entry; CAmount basefee(2000); - double basepri = 1e6; CAmount deltaFee(100); - double deltaPri=5e5; - std::vector<CAmount> feeV[2]; - std::vector<double> priV[2]; + std::vector<CAmount> feeV; - // Populate vectors of increasing fees or priorities + // Populate vectors of increasing fees for (int j = 0; j < 10; j++) { - //V[0] is for fee transactions - feeV[0].push_back(basefee * (j+1)); - priV[0].push_back(0); - //V[1] is for priority transactions - feeV[1].push_back(CAmount(0)); - priV[1].push_back(basepri * pow(10, j+1)); + feeV.push_back(basefee * (j+1)); } // Store the hashes of transactions that have been - // added to the mempool by their associate fee/pri + // added to the mempool by their associate fee // txHashes[j] is populated with transactions either of - // fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1) + // fee = basefee * (j+1) std::vector<uint256> txHashes[10]; // Create a transaction template @@ -46,7 +38,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) for (unsigned int i = 0; i < 128; i++) garbage.push_back('X'); CMutableTransaction tx; - std::list<CTransaction> dummyConflicted; tx.vin.resize(1); tx.vin[0].scriptSig = garbage; tx.vout.resize(1); @@ -54,34 +45,34 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) CFeeRate baseRate(basefee, GetVirtualTransactionSize(tx)); // Create a fake block - std::vector<CTransaction> block; + std::vector<CTransactionRef> block; int blocknum = 0; // Loop through 200 blocks // At a decay .998 and 4 fee transactions per block // This makes the tx count about 1.33 per bucket, above the 1 threshold while (blocknum < 200) { - for (int j = 0; j < 10; j++) { // For each fee/pri multiple - for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx + for (int j = 0; j < 10; j++) { // For each fee + for (int k = 0; k < 4; k++) { // add 4 fee txs tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique uint256 hash = tx.GetHash(); - mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); + mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool)); txHashes[j].push_back(hash); } } - //Create blocks where higher fee/pri txs are included more often + //Create blocks where higher fee txs are included more often for (int h = 0; h <= blocknum%10; h++) { - // 10/10 blocks add highest fee/pri transactions + // 10/10 blocks add highest fee transactions // 9/10 blocks add 2nd highest and so on until ... - // 1/10 blocks add lowest fee/pri transactions + // 1/10 blocks add lowest fee transactions while (txHashes[9-h].size()) { - std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back()); + CTransactionRef ptx = mpool.get(txHashes[9-h].back()); if (ptx) - block.push_back(*ptx); + block.push_back(ptx); txHashes[9-h].pop_back(); } } - mpool.removeForBlock(block, ++blocknum, dummyConflicted); + mpool.removeForBlock(block, ++blocknum); block.clear(); if (blocknum == 30) { // At this point we should need to combine 5 buckets to get enough data points @@ -101,106 +92,104 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) } std::vector<CAmount> origFeeEst; - std::vector<double> origPriEst; // Highest feerate is 10*baseRate and gets in all blocks, // second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%, // third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%, - // so estimateFee(1) should return 10*baseRate. + // so estimateFee(1) would return 10*baseRate but is hardcoded to return failure // Second highest feerate has 100% chance of being included by 2 blocks, // so estimateFee(2) should return 9*baseRate etc... for (int i = 1; i < 10;i++) { origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK()); - origPriEst.push_back(mpool.estimatePriority(i)); - if (i > 1) { // Fee estimates should be monotonically decreasing + if (i > 2) { // Fee estimates should be monotonically decreasing BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]); - BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]); } int mult = 11-i; - BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee); - BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee); - BOOST_CHECK(origPriEst[i-1] < pow(10,mult) * basepri + deltaPri); - BOOST_CHECK(origPriEst[i-1] > pow(10,mult) * basepri - deltaPri); + if (i > 1) { + BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee); + BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee); + } + else { + BOOST_CHECK(origFeeEst[i-1] == CFeeRate(0).GetFeePerK()); + } } // Mine 50 more blocks with no transactions happening, estimates shouldn't change // We haven't decayed the moving average enough so we still have enough data points in every bucket while (blocknum < 250) - mpool.removeForBlock(block, ++blocknum, dummyConflicted); + mpool.removeForBlock(block, ++blocknum); - for (int i = 1; i < 10;i++) { + BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0)); + for (int i = 2; i < 10;i++) { BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); - BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri); - BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); } // Mine 15 more blocks with lots of transactions happening and not getting mined // Estimates should go up while (blocknum < 265) { - for (int j = 0; j < 10; j++) { // For each fee/pri multiple - for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx + for (int j = 0; j < 10; j++) { // For each fee multiple + for (int k = 0; k < 4; k++) { // add 4 fee txs tx.vin[0].prevout.n = 10000*blocknum+100*j+k; uint256 hash = tx.GetHash(); - mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); + mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool)); txHashes[j].push_back(hash); } } - mpool.removeForBlock(block, ++blocknum, dummyConflicted); + mpool.removeForBlock(block, ++blocknum); } int answerFound; for (int i = 1; i < 10;i++) { BOOST_CHECK(mpool.estimateFee(i) == CFeeRate(0) || mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); BOOST_CHECK(mpool.estimateSmartFee(i, &answerFound).GetFeePerK() > origFeeEst[answerFound-1] - deltaFee); - BOOST_CHECK(mpool.estimatePriority(i) == -1 || mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); - BOOST_CHECK(mpool.estimateSmartPriority(i, &answerFound) > origPriEst[answerFound-1] - deltaPri); } // Mine all those transactions // Estimates should still not be below original for (int j = 0; j < 10; j++) { while(txHashes[j].size()) { - std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[j].back()); + CTransactionRef ptx = mpool.get(txHashes[j].back()); if (ptx) - block.push_back(*ptx); + block.push_back(ptx); txHashes[j].pop_back(); } } - mpool.removeForBlock(block, 265, dummyConflicted); + mpool.removeForBlock(block, 265); block.clear(); - for (int i = 1; i < 10;i++) { + BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0)); + for (int i = 2; i < 10;i++) { BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); - BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); } // Mine 200 more blocks where everything is mined every block // Estimates should be below original estimates while (blocknum < 465) { - for (int j = 0; j < 10; j++) { // For each fee/pri multiple - for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx + for (int j = 0; j < 10; j++) { // For each fee multiple + for (int k = 0; k < 4; k++) { // add 4 fee txs tx.vin[0].prevout.n = 10000*blocknum+100*j+k; uint256 hash = tx.GetHash(); - mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); - std::shared_ptr<const CTransaction> ptx = mpool.get(hash); + mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool)); + CTransactionRef ptx = mpool.get(hash); if (ptx) - block.push_back(*ptx); + block.push_back(ptx); + } } - mpool.removeForBlock(block, ++blocknum, dummyConflicted); + mpool.removeForBlock(block, ++blocknum); block.clear(); } - for (int i = 1; i < 10; i++) { + BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0)); + for (int i = 2; i < 10; i++) { BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee); - BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri); } // Test that if the mempool is limited, estimateSmartFee won't return a value below the mempool min fee // and that estimateSmartPriority returns essentially an infinite value - mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[0][5]).Time(GetTime()).Priority(priV[1][5]).Height(blocknum).FromTx(tx, &mpool)); - // evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[0][5] + mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[5]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool)); + // evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[5] mpool.TrimToSize(1); - BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[0][5]); + BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[5]); for (int i = 1; i < 10; i++) { BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.estimateFee(i).GetFeePerK()); BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.GetMinFee(1).GetFeePerK()); diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp index 6cad02e738..1e5de2021c 100644 --- a/src/test/prevector_tests.cpp +++ b/src/test/prevector_tests.cpp @@ -4,12 +4,12 @@ #include <vector> #include "prevector.h" -#include "test_random.h" #include "serialize.h" #include "streams.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include <boost/test/unit_test.hpp> diff --git a/src/test/rpc_tests.cpp b/src/test/rpc_tests.cpp index a3d1a25589..a359598ddc 100644 --- a/src/test/rpc_tests.cpp +++ b/src/test/rpc_tests.cpp @@ -84,6 +84,28 @@ BOOST_AUTO_TEST_CASE(rpc_rawparams) BOOST_CHECK_THROW(CallRPC(string("sendrawtransaction ")+rawtx+" extra"), runtime_error); } +BOOST_AUTO_TEST_CASE(rpc_togglenetwork) +{ + UniValue r; + + r = CallRPC("getnetworkinfo"); + bool netState = find_value(r.get_obj(), "networkactive").get_bool(); + BOOST_CHECK_EQUAL(netState, true); + + BOOST_CHECK_NO_THROW(CallRPC("setnetworkactive false")); + r = CallRPC("getnetworkinfo"); + int numConnection = find_value(r.get_obj(), "connections").get_int(); + BOOST_CHECK_EQUAL(numConnection, 0); + + netState = find_value(r.get_obj(), "networkactive").get_bool(); + BOOST_CHECK_EQUAL(netState, false); + + BOOST_CHECK_NO_THROW(CallRPC("setnetworkactive true")); + r = CallRPC("getnetworkinfo"); + netState = find_value(r.get_obj(), "networkactive").get_bool(); + BOOST_CHECK_EQUAL(netState, true); +} + BOOST_AUTO_TEST_CASE(rpc_rawsign) { UniValue r; diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_P2SH_tests.cpp index 1a01593a8e..789cc70d7f 100644 --- a/src/test/script_P2SH_tests.cpp +++ b/src/test/script_P2SH_tests.cpp @@ -5,7 +5,7 @@ #include "core_io.h" #include "key.h" #include "keystore.h" -#include "main.h" +#include "validation.h" #include "policy/policy.h" #include "script/script.h" #include "script/script_error.h" diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 561adb8ea2..d7f3a3a657 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -173,11 +173,14 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript #if defined(HAVE_CONSENSUS_LIB) CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << tx2; - if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) { - BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect, message); - } else { - BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect, message); - BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(begin_ptr(scriptPubKey), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect,message); + int libconsensus_flags = flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL; + if (libconsensus_flags == flags) { + if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) { + BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message); + } else { + BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message); + BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect,message); + } } #endif } @@ -273,7 +276,7 @@ private: //! The Witness embedded script CScript witscript; CScriptWitness scriptWitness; - CTransaction creditTx; + CTransactionRef creditTx; CMutableTransaction spendTx; bool havePush; std::vector<unsigned char> push; @@ -316,8 +319,8 @@ public: redeemscript = scriptPubKey; scriptPubKey = CScript() << OP_HASH160 << ToByteVector(CScriptID(redeemscript)) << OP_EQUAL; } - creditTx = BuildCreditingTransaction(scriptPubKey, nValue); - spendTx = BuildSpendingTransaction(CScript(), CScriptWitness(), creditTx); + creditTx = MakeTransactionRef(BuildCreditingTransaction(scriptPubKey, nValue)); + spendTx = BuildSpendingTransaction(CScript(), CScriptWitness(), *creditTx); } TestBuilder& ScriptError(ScriptError_t err) @@ -418,7 +421,7 @@ public: { TestBuilder copy = *this; // Make a copy so we can rollback the push. DoPush(); - DoTest(creditTx.vout[0].scriptPubKey, spendTx.vin[0].scriptSig, scriptWitness, flags, comment, scriptError, nValue); + DoTest(creditTx->vout[0].scriptPubKey, spendTx.vin[0].scriptSig, scriptWitness, flags, comment, scriptError, nValue); *this = copy; return *this; } @@ -444,7 +447,7 @@ public: array.push_back(wit); } array.push_back(FormatScript(spendTx.vin[0].scriptSig)); - array.push_back(FormatScript(creditTx.vout[0].scriptPubKey)); + array.push_back(FormatScript(creditTx->vout[0].scriptPubKey)); array.push_back(FormatScriptFlags(flags)); array.push_back(FormatScriptError((ScriptError_t)scriptError)); array.push_back(comment); @@ -458,7 +461,7 @@ public: const CScript& GetScriptPubKey() { - return creditTx.vout[0].scriptPubKey; + return creditTx->vout[0].scriptPubKey; } }; diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index bec2c7459d..1dc86eb116 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -10,11 +10,54 @@ #include <stdint.h> #include <boost/test/unit_test.hpp> - using namespace std; BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup) +class CSerializeMethodsTestSingle +{ +protected: + int intval; + bool boolval; + std::string stringval; + const char* charstrval; + CTransactionRef txval; +public: + CSerializeMethodsTestSingle() = default; + CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(MakeTransactionRef(txvalin)){} + ADD_SERIALIZE_METHODS; + + template <typename Stream, typename Operation> + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(intval); + READWRITE(boolval); + READWRITE(stringval); + READWRITE(FLATDATA(charstrval)); + READWRITE(txval); + } + + bool operator==(const CSerializeMethodsTestSingle& rhs) + { + return intval == rhs.intval && \ + boolval == rhs.boolval && \ + stringval == rhs.stringval && \ + strcmp(charstrval, rhs.charstrval) == 0 && \ + *txval == *rhs.txval; + } +}; + +class CSerializeMethodsTestMany : public CSerializeMethodsTestSingle +{ +public: + using CSerializeMethodsTestSingle::CSerializeMethodsTestSingle; + ADD_SERIALIZE_METHODS; + + template <typename Stream, typename Operation> + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITEMANY(intval, boolval, stringval, FLATDATA(charstrval), txval); + } +}; + BOOST_AUTO_TEST_CASE(sizes) { BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0), 0)); @@ -297,4 +340,30 @@ BOOST_AUTO_TEST_CASE(insert_delete) BOOST_CHECK_EQUAL(ss.size(), 0); } +BOOST_AUTO_TEST_CASE(class_methods) +{ + int intval(100); + bool boolval(true); + std::string stringval("testing"); + const char* charstrval("testing charstr"); + CMutableTransaction txval; + CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval, charstrval, txval); + CSerializeMethodsTestMany methodtest2(intval, boolval, stringval, charstrval, txval); + CSerializeMethodsTestSingle methodtest3; + CSerializeMethodsTestMany methodtest4; + CDataStream ss(SER_DISK, PROTOCOL_VERSION); + BOOST_CHECK(methodtest1 == methodtest2); + ss << methodtest1; + ss >> methodtest4; + ss << methodtest2; + ss >> methodtest3; + BOOST_CHECK(methodtest1 == methodtest2); + BOOST_CHECK(methodtest2 == methodtest3); + BOOST_CHECK(methodtest3 == methodtest4); + + CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, FLATDATA(charstrval), txval); + ss2 >> methodtest3; + BOOST_CHECK(methodtest3 == methodtest4); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 0b1050d020..a524f5b946 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -5,13 +5,13 @@ #include "consensus/validation.h" #include "data/sighash.json.h" #include "hash.h" -#include "main.h" // For CheckTransaction -#include "test_random.h" +#include "validation.h" // For CheckTransaction #include "script/interpreter.h" #include "script/script.h" #include "serialize.h" #include "streams.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include "util.h" #include "utilstrencodings.h" #include "version.h" @@ -184,7 +184,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) std::string raw_tx, raw_script, sigHashHex; int nIn, nHashType; uint256 sh; - CTransaction tx; + CTransactionRef tx; CScript scriptCode = CScript(); try { @@ -199,7 +199,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) stream >> tx; CValidationState state; - BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest); + BOOST_CHECK_MESSAGE(CheckTransaction(*tx, state), strTest); BOOST_CHECK(state.IsValid()); std::vector<unsigned char> raw = ParseHex(raw_script); @@ -209,7 +209,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) continue; } - sh = SignatureHash(scriptCode, tx, nIn, nHashType, 0, SIGVERSION_BASE); + sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SIGVERSION_BASE); BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp index e8a63ae60c..2e974cc612 100644 --- a/src/test/sigopcount_tests.cpp +++ b/src/test/sigopcount_tests.cpp @@ -2,7 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "main.h" +#include "validation.h" #include "pubkey.h" #include "key.h" #include "script/script.h" diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp index b19f8fbffb..d6835df71f 100644 --- a/src/test/skiplist_tests.cpp +++ b/src/test/skiplist_tests.cpp @@ -3,9 +3,9 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chain.h" -#include "test_random.h" #include "util.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include <vector> diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 34f501e867..8b715ce93e 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -15,6 +15,64 @@ using namespace boost::assign; // bring 'operator+=()' into scope BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup) +BOOST_AUTO_TEST_CASE(streams_vector_writer) +{ + unsigned char a(1); + unsigned char b(2); + unsigned char bytes[] = { 3, 4, 5, 6 }; + std::vector<unsigned char> vch; + + // Each test runs twice. Serializing a second time at the same starting + // point should yield the same results, even if the first test grew the + // vector. + + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}})); + vch.clear(); + + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}})); + vch.clear(); + + vch.resize(5, 0); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}})); + vch.clear(); + + vch.resize(4, 0); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}})); + vch.clear(); + + vch.resize(4, 0); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); + vch.clear(); + + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); + vch.clear(); + + vch.resize(4, 8); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); + vch.clear(); +} + BOOST_AUTO_TEST_CASE(streams_serializedata_xor) { std::vector<char> in; diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp index b7a2a854d3..2a5a78de02 100644 --- a/src/test/test_bitcoin.cpp +++ b/src/test/test_bitcoin.cpp @@ -10,8 +10,9 @@ #include "consensus/consensus.h" #include "consensus/validation.h" #include "key.h" -#include "main.h" +#include "validation.h" #include "miner.h" +#include "net_processing.h" #include "pubkey.h" #include "random.h" #include "txdb.h" @@ -103,7 +104,7 @@ TestChain100Setup::TestChain100Setup() : TestingSetup(CBaseChainParams::REGTEST) { std::vector<CMutableTransaction> noTxns; CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey); - coinbaseTxns.push_back(b.vtx[0]); + coinbaseTxns.push_back(*b.vtx[0]); } } @@ -121,15 +122,15 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& // Replace mempool-selected txns with just coinbase plus passed-in txns: block.vtx.resize(1); BOOST_FOREACH(const CMutableTransaction& tx, txns) - block.vtx.push_back(tx); + block.vtx.push_back(MakeTransactionRef(tx)); // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; IncrementExtraNonce(&block, chainActive.Tip(), extraNonce); while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce; - CValidationState state; - ProcessNewBlock(state, chainparams, NULL, &block, true, NULL); + std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block); + ProcessNewBlock(chainparams, shared_pblock, true, NULL); CBlock result = block; return result; @@ -140,12 +141,12 @@ TestChain100Setup::~TestChain100Setup() } -CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CMutableTransaction &tx, CTxMemPool *pool) { +CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx, CTxMemPool *pool) { CTransaction txn(tx); return FromTx(txn, pool); } -CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CTransaction &txn, CTxMemPool *pool) { +CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CTransaction &txn, CTxMemPool *pool) { bool hasNoDependencies = pool ? pool->HasNoInputsOf(txn) : hadNoDependencies; // Hack to assume either its completely dependent on other mempool txs or not at all CAmount inChainValue = hasNoDependencies ? txn.GetValueOut() : 0; diff --git a/src/test/test_bitcoin.h b/src/test/test_bitcoin.h index 9819a7097d..3dea20445d 100644 --- a/src/test/test_bitcoin.h +++ b/src/test/test_bitcoin.h @@ -79,8 +79,8 @@ struct TestMemPoolEntryHelper nFee(0), nTime(0), dPriority(0.0), nHeight(1), hadNoDependencies(false), spendsCoinbase(false), sigOpCost(4) { } - CTxMemPoolEntry FromTx(CMutableTransaction &tx, CTxMemPool *pool = NULL); - CTxMemPoolEntry FromTx(CTransaction &tx, CTxMemPool *pool = NULL); + CTxMemPoolEntry FromTx(const CMutableTransaction &tx, CTxMemPool *pool = NULL); + CTxMemPoolEntry FromTx(const CTransaction &tx, CTxMemPool *pool = NULL); // Change the default value TestMemPoolEntryHelper &Fee(CAmount _fee) { nFee = _fee; return *this; } diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 34d9547f3d..ae5406ef7e 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -12,7 +12,7 @@ #include "core_io.h" #include "key.h" #include "keystore.h" -#include "main.h" // For CheckTransaction +#include "validation.h" // For CheckTransaction #include "policy/policy.h" #include "script/script.h" #include "script/sign.h" @@ -150,8 +150,7 @@ BOOST_AUTO_TEST_CASE(tx_valid) string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION); - CTransaction tx; - stream >> tx; + CTransaction tx(deserialize, stream); CValidationState state; BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest); @@ -236,8 +235,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid) string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION ); - CTransaction tx; - stream >> tx; + CTransaction tx(deserialize, stream); CValidationState state; fValid = CheckTransaction(tx, state) && state.IsValid(); @@ -346,7 +344,7 @@ BOOST_AUTO_TEST_CASE(test_Get) BOOST_CHECK_EQUAL(coins.GetValueIn(t1), (50+21+22)*CENT); } -void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, CTransaction& output, CMutableTransaction& input, bool success = true) +void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, CTransactionRef& output, CMutableTransaction& input, bool success = true) { CMutableTransaction outputm; outputm.nVersion = 1; @@ -360,22 +358,22 @@ void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, C CDataStream ssout(SER_NETWORK, PROTOCOL_VERSION); ssout << outputm; ssout >> output; - assert(output.vin.size() == 1); - assert(output.vin[0] == outputm.vin[0]); - assert(output.vout.size() == 1); - assert(output.vout[0] == outputm.vout[0]); - assert(output.wit.vtxinwit.size() == 0); + assert(output->vin.size() == 1); + assert(output->vin[0] == outputm.vin[0]); + assert(output->vout.size() == 1); + assert(output->vout[0] == outputm.vout[0]); + assert(output->wit.vtxinwit.size() == 0); CMutableTransaction inputm; inputm.nVersion = 1; inputm.vin.resize(1); - inputm.vin[0].prevout.hash = output.GetHash(); + inputm.vin[0].prevout.hash = output->GetHash(); inputm.vin[0].prevout.n = 0; inputm.wit.vtxinwit.resize(1); inputm.vout.resize(1); inputm.vout[0].nValue = 1; inputm.vout[0].scriptPubKey = CScript(); - bool ret = SignSignature(keystore, output, inputm, 0, SIGHASH_ALL); + bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL); assert(ret == success); CDataStream ssin(SER_NETWORK, PROTOCOL_VERSION); ssin << inputm; @@ -393,11 +391,11 @@ void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, C } } -void CheckWithFlag(const CTransaction& output, const CMutableTransaction& input, int flags, bool success) +void CheckWithFlag(const CTransactionRef& output, const CMutableTransaction& input, int flags, bool success) { ScriptError error; CTransaction inputi(input); - bool ret = VerifyScript(inputi.vin[0].scriptSig, output.vout[0].scriptPubKey, inputi.wit.vtxinwit.size() > 0 ? &inputi.wit.vtxinwit[0].scriptWitness : NULL, flags, TransactionSignatureChecker(&inputi, 0, output.vout[0].nValue), &error); + bool ret = VerifyScript(inputi.vin[0].scriptSig, output->vout[0].scriptPubKey, inputi.wit.vtxinwit.size() > 0 ? &inputi.wit.vtxinwit[0].scriptWitness : NULL, flags, TransactionSignatureChecker(&inputi, 0, output->vout[0].nValue), &error); assert(ret == success); } @@ -466,10 +464,10 @@ BOOST_AUTO_TEST_CASE(test_big_witness_transaction) { assert(hashSigned); } - CTransaction tx; CDataStream ssout(SER_NETWORK, PROTOCOL_VERSION); - WithOrVersion(&ssout, 0) << mtx; - WithOrVersion(&ssout, 0) >> tx; + auto vstream = WithOrVersion(&ssout, 0); + vstream << mtx; + CTransaction tx(deserialize, vstream); // check all inputs concurrently, with the cache PrecomputedTransactionData txdata(tx); @@ -547,7 +545,7 @@ BOOST_AUTO_TEST_CASE(test_witness) keystore2.AddCScript(GetScriptForWitness(scriptMulti)); keystore2.AddKeyPubKey(key3, pubkey3); - CTransaction output1, output2; + CTransactionRef output1, output2; CMutableTransaction input1, input2; SignatureData sigdata; @@ -639,8 +637,8 @@ BOOST_AUTO_TEST_CASE(test_witness) CheckWithFlag(output1, input1, 0, false); CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false); CheckWithFlag(output2, input2, 0, false); - BOOST_CHECK(output1 == output2); - UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); + BOOST_CHECK(*output1 == *output2); + UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); // P2SH 2-of-2 multisig @@ -650,8 +648,8 @@ BOOST_AUTO_TEST_CASE(test_witness) CreateCreditAndSpend(keystore2, GetScriptForDestination(CScriptID(scriptMulti)), output2, input2, false); CheckWithFlag(output2, input2, 0, true); CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false); - BOOST_CHECK(output1 == output2); - UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); + BOOST_CHECK(*output1 == *output2); + UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); @@ -662,8 +660,8 @@ BOOST_AUTO_TEST_CASE(test_witness) CreateCreditAndSpend(keystore2, GetScriptForWitness(scriptMulti), output2, input2, false); CheckWithFlag(output2, input2, 0, true); CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false); - BOOST_CHECK(output1 == output2); - UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); + BOOST_CHECK(*output1 == *output2); + UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); @@ -674,8 +672,8 @@ BOOST_AUTO_TEST_CASE(test_witness) CreateCreditAndSpend(keystore2, GetScriptForDestination(CScriptID(GetScriptForWitness(scriptMulti))), output2, input2, false); CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false); - BOOST_CHECK(output1 == output2); - UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); + BOOST_CHECK(*output1 == *output2); + UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); } diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 76e4e7a4be..55b4d28fb2 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -4,7 +4,7 @@ #include "consensus/validation.h" #include "key.h" -#include "main.h" +#include "validation.h" #include "miner.h" #include "pubkey.h" #include "txmempool.h" diff --git a/src/test/uint256_tests.cpp b/src/test/uint256_tests.cpp index da0a3d73e0..2732948060 100644 --- a/src/test/uint256_tests.cpp +++ b/src/test/uint256_tests.cpp @@ -184,25 +184,25 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G BOOST_CHECK(OneL.begin() + 32 == OneL.end()); BOOST_CHECK(MaxL.begin() + 32 == MaxL.end()); BOOST_CHECK(TmpL.begin() + 32 == TmpL.end()); - BOOST_CHECK(R1L.GetSerializeSize(0,PROTOCOL_VERSION) == 32); - BOOST_CHECK(ZeroL.GetSerializeSize(0,PROTOCOL_VERSION) == 32); + BOOST_CHECK(GetSerializeSize(R1L, 0, PROTOCOL_VERSION) == 32); + BOOST_CHECK(GetSerializeSize(ZeroL, 0, PROTOCOL_VERSION) == 32); - std::stringstream ss; - R1L.Serialize(ss,0,PROTOCOL_VERSION); + CDataStream ss(0, PROTOCOL_VERSION); + ss << R1L; BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+32)); - TmpL.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpL; BOOST_CHECK(R1L == TmpL); - ss.str(""); - ZeroL.Serialize(ss,0,PROTOCOL_VERSION); + ss.clear(); + ss << ZeroL; BOOST_CHECK(ss.str() == std::string(ZeroArray,ZeroArray+32)); - TmpL.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpL; BOOST_CHECK(ZeroL == TmpL); - ss.str(""); - MaxL.Serialize(ss,0,PROTOCOL_VERSION); + ss.clear(); + ss << MaxL; BOOST_CHECK(ss.str() == std::string(MaxArray,MaxArray+32)); - TmpL.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpL; BOOST_CHECK(MaxL == TmpL); - ss.str(""); + ss.clear(); BOOST_CHECK(R1S.GetHex() == R1S.ToString()); BOOST_CHECK(R2S.GetHex() == R2S.ToString()); @@ -230,24 +230,24 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G BOOST_CHECK(OneS.begin() + 20 == OneS.end()); BOOST_CHECK(MaxS.begin() + 20 == MaxS.end()); BOOST_CHECK(TmpS.begin() + 20 == TmpS.end()); - BOOST_CHECK(R1S.GetSerializeSize(0,PROTOCOL_VERSION) == 20); - BOOST_CHECK(ZeroS.GetSerializeSize(0,PROTOCOL_VERSION) == 20); + BOOST_CHECK(GetSerializeSize(R1S, 0, PROTOCOL_VERSION) == 20); + BOOST_CHECK(GetSerializeSize(ZeroS, 0, PROTOCOL_VERSION) == 20); - R1S.Serialize(ss,0,PROTOCOL_VERSION); + ss << R1S; BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+20)); - TmpS.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpS; BOOST_CHECK(R1S == TmpS); - ss.str(""); - ZeroS.Serialize(ss,0,PROTOCOL_VERSION); + ss.clear(); + ss << ZeroS; BOOST_CHECK(ss.str() == std::string(ZeroArray,ZeroArray+20)); - TmpS.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpS; BOOST_CHECK(ZeroS == TmpS); - ss.str(""); - MaxS.Serialize(ss,0,PROTOCOL_VERSION); + ss.clear(); + ss << MaxS; BOOST_CHECK(ss.str() == std::string(MaxArray,MaxArray+20)); - TmpS.Unserialize(ss,0,PROTOCOL_VERSION); + ss >> TmpS; BOOST_CHECK(MaxS == TmpS); - ss.str(""); + ss.clear(); } BOOST_AUTO_TEST_CASE( conversion ) diff --git a/src/test/univalue_tests.cpp b/src/test/univalue_tests.cpp index 45d480c816..7f794fcbe9 100644 --- a/src/test/univalue_tests.cpp +++ b/src/test/univalue_tests.cpp @@ -1,4 +1,4 @@ -// Copyright 2014 BitPay, Inc. +// Copyright (c) 2014 BitPay Inc. // Copyright (c) 2014-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 0f1c7ab222..bad72ffc0f 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -6,11 +6,11 @@ #include "clientversion.h" #include "primitives/transaction.h" -#include "test_random.h" #include "sync.h" #include "utilstrencodings.h" #include "utilmoneystr.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include <stdint.h> #include <vector> diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index 784e796998..bae0eff7e5 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -3,11 +3,11 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chain.h" -#include "test_random.h" #include "versionbits.h" #include "test/test_bitcoin.h" +#include "test/test_random.h" #include "chainparams.h" -#include "main.h" +#include "validation.h" #include "consensus/params.h" #include <boost/test/unit_test.hpp> diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 1ca6b46566..67a69363f0 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -470,7 +470,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& // Finally - now create the service if (private_key.empty()) // No private key, generate one - private_key = "NEW:BEST"; + private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214 // Request hidden service, redirect port. // Note that the 'virtual' port doesn't have to be the same as our internal port, but this is just a convenient // choice. TODO; refactor the shutdown sequence some day. @@ -501,10 +501,10 @@ static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::v { CHMAC_SHA256 computeHash((const uint8_t*)key.data(), key.size()); std::vector<uint8_t> computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0); - computeHash.Write(begin_ptr(cookie), cookie.size()); - computeHash.Write(begin_ptr(clientNonce), clientNonce.size()); - computeHash.Write(begin_ptr(serverNonce), serverNonce.size()); - computeHash.Finalize(begin_ptr(computedHash)); + computeHash.Write(cookie.data(), cookie.size()); + computeHash.Write(clientNonce.data(), clientNonce.size()); + computeHash.Write(serverNonce.data(), serverNonce.size()); + computeHash.Finalize(computedHash.data()); return computedHash; } diff --git a/src/txdb.h b/src/txdb.h index adb3f66327..687c686775 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -43,7 +43,7 @@ struct CDiskTxPos : public CDiskBlockPos ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(*(CDiskBlockPos*)this); READWRITE(VARINT(nTxOffset)); } diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 0f1c166abc..4334ebde6d 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -8,7 +8,7 @@ #include "clientversion.h" #include "consensus/consensus.h" #include "consensus/validation.h" -#include "main.h" +#include "validation.h" #include "policy/policy.h" #include "policy/fees.h" #include "streams.h" @@ -24,7 +24,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, bool poolHasNoInputsOf, CAmount _inChainInputValue, bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp): - tx(std::make_shared<CTransaction>(_tx)), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), + tx(MakeTransactionRef(_tx)), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), hadNoDependencies(poolHasNoInputsOf), inChainInputValue(_inChainInputValue), spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp) { @@ -503,7 +503,7 @@ void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants } } -void CTxMemPool::removeRecursive(const CTransaction &origTx, std::list<CTransaction>& removed) +void CTxMemPool::removeRecursive(const CTransaction &origTx) { // Remove transaction from memory pool { @@ -530,9 +530,6 @@ void CTxMemPool::removeRecursive(const CTransaction &origTx, std::list<CTransact BOOST_FOREACH(txiter it, txToRemove) { CalculateDescendants(it, setAllRemoves); } - BOOST_FOREACH(txiter it, setAllRemoves) { - removed.push_back(it->GetTx()); - } RemoveStaged(setAllRemoves, false); } } @@ -541,7 +538,7 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem { // Remove transactions spending a coinbase which are now immature and no-longer-final transactions LOCK(cs); - list<CTransaction> transactionsToRemove; + setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); @@ -549,16 +546,16 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem if (!CheckFinalTx(tx, flags) || !CheckSequenceLocks(tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be invalid // So it's critical that we remove the tx and not depend on the LockPoints. - transactionsToRemove.push_back(tx); + txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { BOOST_FOREACH(const CTxIn& txin, tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) continue; const CCoins *coins = pcoins->AccessCoins(txin.prevout.hash); - if (nCheckFrequency != 0) assert(coins); + if (nCheckFrequency != 0) assert(coins); if (!coins || (coins->IsCoinBase() && ((signed long)nMemPoolHeight) - coins->nHeight < COINBASE_MATURITY)) { - transactionsToRemove.push_back(tx); + txToRemove.insert(it); break; } } @@ -567,13 +564,14 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem mapTx.modify(it, update_lock_points(lp)); } } - BOOST_FOREACH(const CTransaction& tx, transactionsToRemove) { - list<CTransaction> removed; - removeRecursive(tx, removed); + setEntries setAllRemoves; + for (txiter it : txToRemove) { + CalculateDescendants(it, setAllRemoves); } + RemoveStaged(setAllRemoves, false); } -void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed) +void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively LOCK(cs); @@ -583,7 +581,7 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction> const CTransaction &txConflict = *it->second; if (txConflict != tx) { - removeRecursive(txConflict, removed); + removeRecursive(txConflict); ClearPrioritisation(txConflict.GetHash()); } } @@ -593,29 +591,29 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction> /** * Called when a block is connected. Removes from mempool and updates the miner fee estimator. */ -void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight, - std::list<CTransaction>& conflicts, bool fCurrentEstimate) +void CTxMemPool::removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight, + bool fCurrentEstimate) { LOCK(cs); std::vector<CTxMemPoolEntry> entries; - BOOST_FOREACH(const CTransaction& tx, vtx) + for (const auto& tx : vtx) { - uint256 hash = tx.GetHash(); + uint256 hash = tx->GetHash(); indexed_transaction_set::iterator i = mapTx.find(hash); if (i != mapTx.end()) entries.push_back(*i); } - BOOST_FOREACH(const CTransaction& tx, vtx) + for (const auto& tx : vtx) { - txiter it = mapTx.find(tx.GetHash()); + txiter it = mapTx.find(tx->GetHash()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true); } - removeConflicts(tx, conflicts); - ClearPrioritisation(tx.GetHash()); + removeConflicts(*tx); + ClearPrioritisation(tx->GetHash()); } // After the txs in the new block have been removed from the mempool, update policy estimates minerPolicyEstimator->processBlock(nBlockHeight, entries, fCurrentEstimate); @@ -830,6 +828,10 @@ void CTxMemPool::queryHashes(vector<uint256>& vtxid) } } +static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { + return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize()), it->GetModifiedFee() - it->GetFee()}; +} + std::vector<TxMempoolInfo> CTxMemPool::infoAll() const { LOCK(cs); @@ -838,13 +840,13 @@ std::vector<TxMempoolInfo> CTxMemPool::infoAll() const std::vector<TxMempoolInfo> ret; ret.reserve(mapTx.size()); for (auto it : iters) { - ret.push_back(TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize())}); + ret.push_back(GetInfo(it)); } return ret; } -std::shared_ptr<const CTransaction> CTxMemPool::get(const uint256& hash) const +CTransactionRef CTxMemPool::get(const uint256& hash) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hash); @@ -859,7 +861,7 @@ TxMempoolInfo CTxMemPool::info(const uint256& hash) const indexed_transaction_set::const_iterator i = mapTx.find(hash); if (i == mapTx.end()) return TxMempoolInfo(); - return TxMempoolInfo{i->GetSharedTx(), i->GetTime(), CFeeRate(i->GetFee(), i->GetTxSize())}; + return GetInfo(i); } CFeeRate CTxMemPool::estimateFee(int nBlocks) const @@ -888,7 +890,7 @@ CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const { try { LOCK(cs); - fileout << 109900; // version required to read: 0.10.99 or later + fileout << 139900; // version required to read: 0.13.99 or later fileout << CLIENT_VERSION; // version that wrote the file minerPolicyEstimator->Write(fileout); } @@ -907,9 +909,8 @@ CTxMemPool::ReadFeeEstimates(CAutoFile& filein) filein >> nVersionRequired >> nVersionThatWrote; if (nVersionRequired > CLIENT_VERSION) return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired); - LOCK(cs); - minerPolicyEstimator->Read(filein); + minerPolicyEstimator->Read(filein, nVersionThatWrote); } catch (const std::exception&) { LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n"); @@ -972,7 +973,7 @@ bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) const { // If an entry in the mempool exists, always return that one, as it's guaranteed to never // conflict with the underlying cache, and it cannot have pruned entries (as it contains full) // transactions. First checking the underlying cache risks returning a pruned entry instead. - shared_ptr<const CTransaction> ptx = mempool.get(txid); + CTransactionRef ptx = mempool.get(txid); if (ptx) { coins = CCoins(*ptx, MEMPOOL_HEIGHT); return true; diff --git a/src/txmempool.h b/src/txmempool.h index 1763930ba0..8a935391de 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -6,9 +6,12 @@ #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H -#include <list> #include <memory> #include <set> +#include <map> +#include <vector> +#include <utility> +#include <string> #include "amount.h" #include "coins.h" @@ -59,7 +62,7 @@ class CTxMemPool; /** \class CTxMemPoolEntry * - * CTxMemPoolEntry stores data about the correponding transaction, as well + * CTxMemPoolEntry stores data about the corresponding transaction, as well * as data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * @@ -77,7 +80,7 @@ class CTxMemPool; class CTxMemPoolEntry { private: - std::shared_ptr<const CTransaction> tx; + CTransactionRef tx; CAmount nFee; //!< Cached to avoid expensive parent-transaction lookups size_t nTxWeight; //!< ... and avoid recomputing tx weight (also used for GetTxSize()) size_t nModSize; //!< ... and modified size for priority @@ -115,7 +118,7 @@ public: CTxMemPoolEntry(const CTxMemPoolEntry& other); const CTransaction& GetTx() const { return *this->tx; } - std::shared_ptr<const CTransaction> GetSharedTx() const { return this->tx; } + CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update * from entry priority. Only inputs that were originally in-chain will age. @@ -319,13 +322,16 @@ class CBlockPolicyEstimator; struct TxMempoolInfo { /** The transaction itself */ - std::shared_ptr<const CTransaction> tx; + CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; + + /** The fee delta. */ + int64_t nFeeDelta; }; /** @@ -521,11 +527,11 @@ public: bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate = true); bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool fCurrentEstimate = true); - void removeRecursive(const CTransaction &tx, std::list<CTransaction>& removed); + void removeRecursive(const CTransaction &tx); void removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags); - void removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed); - void removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight, - std::list<CTransaction>& conflicts, bool fCurrentEstimate = true); + void removeConflicts(const CTransaction &tx); + void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight, + bool fCurrentEstimate = true); void clear(); void _clear(); //lock free bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); @@ -617,7 +623,7 @@ public: return (mapTx.count(hash) != 0); } - std::shared_ptr<const CTransaction> get(const uint256& hash) const; + CTransactionRef get(const uint256& hash) const; TxMempoolInfo info(const uint256& hash) const; std::vector<TxMempoolInfo> infoAll() const; diff --git a/src/ui_interface.h b/src/ui_interface.h index 177ff238db..15b9614f63 100644 --- a/src/ui_interface.h +++ b/src/ui_interface.h @@ -85,6 +85,9 @@ public: /** Number of network connections changed. */ boost::signals2::signal<void (int newNumConnections)> NotifyNumConnectionsChanged; + /** Network activity state changed. */ + boost::signals2::signal<void (bool networkActive)> NotifyNetworkActiveChanged; + /** * Status bar alerts changed. */ diff --git a/src/uint256.h b/src/uint256.h index dd8432d74c..86e7c0b6c6 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -78,11 +78,6 @@ public: return sizeof(data); } - unsigned int GetSerializeSize(int nType, int nVersion) const - { - return sizeof(data); - } - uint64_t GetUint64(int pos) const { const uint8_t* ptr = data + pos * 8; @@ -97,13 +92,13 @@ public: } template<typename Stream> - void Serialize(Stream& s, int nType, int nVersion) const + void Serialize(Stream& s) const { s.write((char*)data, sizeof(data)); } template<typename Stream> - void Unserialize(Stream& s, int nType, int nVersion) + void Unserialize(Stream& s) { s.read((char*)data, sizeof(data)); } diff --git a/src/undo.h b/src/undo.h index d4fc84c90c..a5d276e7f3 100644 --- a/src/undo.h +++ b/src/undo.h @@ -27,29 +27,23 @@ public: CTxInUndo() : txout(), fCoinBase(false), nHeight(0), nVersion(0) {} CTxInUndo(const CTxOut &txoutIn, bool fCoinBaseIn = false, unsigned int nHeightIn = 0, int nVersionIn = 0) : txout(txoutIn), fCoinBase(fCoinBaseIn), nHeight(nHeightIn), nVersion(nVersionIn) { } - unsigned int GetSerializeSize(int nType, int nVersion) const { - return ::GetSerializeSize(VARINT(nHeight*2+(fCoinBase ? 1 : 0)), nType, nVersion) + - (nHeight > 0 ? ::GetSerializeSize(VARINT(this->nVersion), nType, nVersion) : 0) + - ::GetSerializeSize(CTxOutCompressor(REF(txout)), nType, nVersion); - } - template<typename Stream> - void Serialize(Stream &s, int nType, int nVersion) const { - ::Serialize(s, VARINT(nHeight*2+(fCoinBase ? 1 : 0)), nType, nVersion); + void Serialize(Stream &s) const { + ::Serialize(s, VARINT(nHeight*2+(fCoinBase ? 1 : 0))); if (nHeight > 0) - ::Serialize(s, VARINT(this->nVersion), nType, nVersion); - ::Serialize(s, CTxOutCompressor(REF(txout)), nType, nVersion); + ::Serialize(s, VARINT(this->nVersion)); + ::Serialize(s, CTxOutCompressor(REF(txout))); } template<typename Stream> - void Unserialize(Stream &s, int nType, int nVersion) { + void Unserialize(Stream &s) { unsigned int nCode = 0; - ::Unserialize(s, VARINT(nCode), nType, nVersion); + ::Unserialize(s, VARINT(nCode)); nHeight = nCode / 2; fCoinBase = nCode & 1; if (nHeight > 0) - ::Unserialize(s, VARINT(this->nVersion), nType, nVersion); - ::Unserialize(s, REF(CTxOutCompressor(REF(txout))), nType, nVersion); + ::Unserialize(s, VARINT(this->nVersion)); + ::Unserialize(s, REF(CTxOutCompressor(REF(txout)))); } }; @@ -63,7 +57,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(vprevout); } }; @@ -77,7 +71,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(vtxundo); } }; diff --git a/src/util.cpp b/src/util.cpp index c20ede6221..60701e7949 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -107,7 +107,6 @@ map<string, vector<string> > mapMultiArgs; bool fDebug = false; bool fPrintToConsole = false; bool fPrintToDebugLog = true; -bool fServer = false; string strMiscWarning; bool fLogTimestamps = DEFAULT_LOGTIMESTAMPS; bool fLogTimeMicros = DEFAULT_LOGTIMEMICROS; @@ -259,7 +258,7 @@ bool LogAcceptCategory(const char* category) * suppress printing of the timestamp when multiple calls are made that don't * end in a newline. Initialize it to true, and hold it, in the calling context. */ -static std::string LogTimestampStr(const std::string &str, bool *fStartedNewLine) +static std::string LogTimestampStr(const std::string &str, std::atomic_bool *fStartedNewLine) { string strStamped; @@ -286,7 +285,7 @@ static std::string LogTimestampStr(const std::string &str, bool *fStartedNewLine int LogPrintStr(const std::string &str) { int ret = 0; // Returns total number of characters written - static bool fStartedNewLine = true; + static std::atomic_bool fStartedNewLine(true); string strTimestamped = LogTimestampStr(str, &fStartedNewLine); @@ -705,13 +704,13 @@ void ShrinkDebugFile() // Restart the file with some of the end std::vector <char> vch(200000,0); fseek(file, -((long)vch.size()), SEEK_END); - int nBytes = fread(begin_ptr(vch), 1, vch.size(), file); + int nBytes = fread(vch.data(), 1, vch.size(), file); fclose(file); file = fopen(pathLog.string().c_str(), "w"); if (file) { - fwrite(begin_ptr(vch), 1, nBytes, file); + fwrite(vch.data(), 1, nBytes, file); fclose(file); } } diff --git a/src/util.h b/src/util.h index bbb9b5db82..e8aa266f28 100644 --- a/src/util.h +++ b/src/util.h @@ -46,7 +46,6 @@ extern std::map<std::string, std::vector<std::string> > mapMultiArgs; extern bool fDebug; extern bool fPrintToConsole; extern bool fPrintToDebugLog; -extern bool fServer; extern std::string strMiscWarning; extern bool fLogTimestamps; extern bool fLogTimeMicros; diff --git a/src/utiltime.cpp b/src/utiltime.cpp index da590f8889..51d545ef8a 100644 --- a/src/utiltime.cpp +++ b/src/utiltime.cpp @@ -74,8 +74,9 @@ void MilliSleep(int64_t n) std::string DateTimeStrFormat(const char* pszFormat, int64_t nTime) { + static std::locale classic(std::locale::classic()); // std::locale takes ownership of the pointer - std::locale loc(std::locale::classic(), new boost::posix_time::time_facet(pszFormat)); + std::locale loc(classic, new boost::posix_time::time_facet(pszFormat)); std::stringstream ss; ss.imbue(loc); ss << boost::posix_time::from_time_t(nTime); diff --git a/src/main.cpp b/src/validation.cpp index 50158b4687..02c047f1d4 100644 --- a/src/main.cpp +++ b/src/validation.cpp @@ -3,11 +3,9 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "main.h" +#include "validation.h" -#include "addrman.h" #include "arith_uint256.h" -#include "blockencodings.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" @@ -16,8 +14,6 @@ #include "consensus/validation.h" #include "hash.h" #include "init.h" -#include "merkleblock.h" -#include "net.h" #include "policy/fees.h" #include "policy/policy.h" #include "pow.h" @@ -27,6 +23,7 @@ #include "script/script.h" #include "script/sigcache.h" #include "script/standard.h" +#include "timedata.h" #include "tinyformat.h" #include "txdb.h" #include "txmempool.h" @@ -63,11 +60,10 @@ CCriticalSection cs_main; BlockMap mapBlockIndex; CChain chainActive; CBlockIndex *pindexBestHeader = NULL; -int64_t nTimeBestReceived = 0; CWaitableCriticalSection csBestBlock; CConditionVariable cvBlockChange; int nScriptCheckThreads = 0; -bool fImporting = false; +std::atomic_bool fImporting(false); bool fReindex = false; bool fTxIndex = false; bool fHavePruned = false; @@ -86,25 +82,6 @@ CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CTxMemPool mempool(::minRelayTxFee); -FeeFilterRounder filterRounder(::minRelayTxFee); - -struct IteratorComparator -{ - template<typename I> - bool operator()(const I& a, const I& b) - { - return &(*a) < &(*b); - } -}; - -struct COrphanTx { - CTransaction tx; - NodeId fromPeer; - int64_t nTimeExpire; -}; -map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main); -map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); -void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); static void CheckBlockIndex(const Consensus::Params& consensusParams); @@ -113,8 +90,6 @@ CScript COINBASE_FLAGS; const string strMessageMagic = "Bitcoin Signed Message:\n"; -static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8] - // Internal stuff namespace { @@ -147,8 +122,6 @@ namespace { * missing the data for the block. */ set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates; - /** Number of nodes with fSyncStarted. */ - int nSyncStarted = 0; /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions. * Pruned nodes may have entries where B is missing data. */ @@ -175,501 +148,13 @@ namespace { /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; - /** - * Sources of received blocks, saved to be able to send them reject - * messages or ban them when processing happens afterwards. Protected by - * cs_main. - */ - map<uint256, NodeId> mapBlockSource; - - /** - * Filter for transactions that were recently rejected by - * AcceptToMemoryPool. These are not rerequested until the chain tip - * changes, at which point the entire filter is reset. Protected by - * cs_main. - * - * Without this filter we'd be re-requesting txs from each of our peers, - * increasing bandwidth consumption considerably. For instance, with 100 - * peers, half of which relay a tx we don't accept, that might be a 50x - * bandwidth increase. A flooding attacker attempting to roll-over the - * filter using minimum-sized, 60byte, transactions might manage to send - * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a - * two minute window to send invs to us. - * - * Decreasing the false positive rate is fairly cheap, so we pick one in a - * million to make it highly unlikely for users to have issues with this - * filter. - * - * Memory used: 1.3 MB - */ - std::unique_ptr<CRollingBloomFilter> recentRejects; - uint256 hashRecentRejectsChainTip; - - /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ - struct QueuedBlock { - uint256 hash; - CBlockIndex* pindex; //!< Optional. - bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. - std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads - }; - map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; - - /** Stack of nodes which we have set to announce using compact blocks */ - list<NodeId> lNodesAnnouncingHeaderAndIDs; - - /** Number of preferable block download peers. */ - int nPreferredDownload = 0; - /** Dirty block index entries. */ set<CBlockIndex*> setDirtyBlockIndex; /** Dirty block file entries. */ set<int> setDirtyFileInfo; - - /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads = 0; - - /** Relay map, protected by cs_main. */ - typedef std::map<uint256, std::shared_ptr<const CTransaction>> MapRelay; - MapRelay mapRelay; - /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */ - std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration; -} // anon namespace - -////////////////////////////////////////////////////////////////////////////// -// -// Registration of network node signals. -// - -namespace { - -struct CBlockReject { - unsigned char chRejectCode; - string strRejectReason; - uint256 hashBlock; -}; - -/** - * Maintain validation-specific state about nodes, protected by cs_main, instead - * by CNode's own locks. This simplifies asynchronous operation, where - * processing of incoming data is done after the ProcessMessage call returns, - * and we're no longer holding the node's locks. - */ -struct CNodeState { - //! The peer's address - CService address; - //! Whether we have a fully established connection. - bool fCurrentlyConnected; - //! Accumulated misbehaviour score for this peer. - int nMisbehavior; - //! Whether this peer should be disconnected and banned (unless whitelisted). - bool fShouldBan; - //! String name of this peer (debugging/logging purposes). - std::string name; - //! List of asynchronously-determined block rejections to notify this peer about. - std::vector<CBlockReject> rejects; - //! The best known block we know this peer has announced. - CBlockIndex *pindexBestKnownBlock; - //! The hash of the last unknown block this peer has announced. - uint256 hashLastUnknownBlock; - //! The last full block we both have. - CBlockIndex *pindexLastCommonBlock; - //! The best header we have sent our peer. - CBlockIndex *pindexBestHeaderSent; - //! Length of current-streak of unconnecting headers announcements - int nUnconnectingHeaders; - //! Whether we've started headers synchronization with this peer. - bool fSyncStarted; - //! Since when we're stalling block download progress (in microseconds), or 0. - int64_t nStallingSince; - list<QueuedBlock> vBlocksInFlight; - //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. - int64_t nDownloadingSince; - int nBlocksInFlight; - int nBlocksInFlightValidHeaders; - //! Whether we consider this a preferred download peer. - bool fPreferredDownload; - //! Whether this peer wants invs or headers (when possible) for block announcements. - bool fPreferHeaders; - //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements. - bool fPreferHeaderAndIDs; - /** - * Whether this peer will send us cmpctblocks if we request them. - * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion, - * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send. - */ - bool fProvidesHeaderAndIDs; - //! Whether this peer can give us witnesses - bool fHaveWitness; - //! Whether this peer wants witnesses in cmpctblocks/blocktxns - bool fWantsCmpctWitness; - /** - * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns, - * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns. - */ - bool fSupportsDesiredCmpctVersion; - - CNodeState() { - fCurrentlyConnected = false; - nMisbehavior = 0; - fShouldBan = false; - pindexBestKnownBlock = NULL; - hashLastUnknownBlock.SetNull(); - pindexLastCommonBlock = NULL; - pindexBestHeaderSent = NULL; - nUnconnectingHeaders = 0; - fSyncStarted = false; - nStallingSince = 0; - nDownloadingSince = 0; - nBlocksInFlight = 0; - nBlocksInFlightValidHeaders = 0; - fPreferredDownload = false; - fPreferHeaders = false; - fPreferHeaderAndIDs = false; - fProvidesHeaderAndIDs = false; - fHaveWitness = false; - fWantsCmpctWitness = false; - fSupportsDesiredCmpctVersion = false; - } -}; - -/** Map maintaining per-node state. Requires cs_main. */ -map<NodeId, CNodeState> mapNodeState; - -// Requires cs_main. -CNodeState *State(NodeId pnode) { - map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode); - if (it == mapNodeState.end()) - return NULL; - return &it->second; -} - -void UpdatePreferredDownload(CNode* node, CNodeState* state) -{ - nPreferredDownload -= state->fPreferredDownload; - - // Whether this node should be marked as a preferred download node. - state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; - - nPreferredDownload += state->fPreferredDownload; -} - -void InitializeNode(NodeId nodeid, const CNode *pnode) { - LOCK(cs_main); - CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; - state.name = pnode->addrName; - state.address = pnode->addr; -} - -void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) { - fUpdateConnectionTime = false; - LOCK(cs_main); - CNodeState *state = State(nodeid); - - if (state->fSyncStarted) - nSyncStarted--; - - if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { - fUpdateConnectionTime = true; - } - - BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) { - mapBlocksInFlight.erase(entry.hash); - } - EraseOrphansFor(nodeid); - nPreferredDownload -= state->fPreferredDownload; - nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); - assert(nPeersWithValidatedDownloads >= 0); - - mapNodeState.erase(nodeid); - - if (mapNodeState.empty()) { - // Do a consistency check after the last peer is removed. - assert(mapBlocksInFlight.empty()); - assert(nPreferredDownload == 0); - assert(nPeersWithValidatedDownloads == 0); - } -} - -// Requires cs_main. -// Returns a bool indicating whether we requested this block. -// Also used if a block was /not/ received and timed out or started with another peer -bool MarkBlockAsReceived(const uint256& hash) { - map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); - if (itInFlight != mapBlocksInFlight.end()) { - CNodeState *state = State(itInFlight->second.first); - state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; - if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { - // Last validated block on the queue was received. - nPeersWithValidatedDownloads--; - } - if (state->vBlocksInFlight.begin() == itInFlight->second.second) { - // First block on the queue was received, update the start download time for the next one - state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); - } - state->vBlocksInFlight.erase(itInFlight->second.second); - state->nBlocksInFlight--; - state->nStallingSince = 0; - mapBlocksInFlight.erase(itInFlight); - return true; - } - return false; -} - -// Requires cs_main. -// returns false, still setting pit, if the block was already in flight from the same peer -// pit will only be valid as long as the same cs_main lock is being held -bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) { - CNodeState *state = State(nodeid); - assert(state != NULL); - - // Short-circuit most stuff in case its from the same node - map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); - if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { - *pit = &itInFlight->second.second; - return false; - } - - // Make sure it's not listed somewhere already. - MarkBlockAsReceived(hash); - - list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), - {hash, pindex, pindex != NULL, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : NULL)}); - state->nBlocksInFlight++; - state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; - if (state->nBlocksInFlight == 1) { - // We're starting a block download (batch) from this peer. - state->nDownloadingSince = GetTimeMicros(); - } - if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) { - nPeersWithValidatedDownloads++; - } - itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first; - if (pit) - *pit = &itInFlight->second.second; - return true; -} - -/** Check whether the last unknown block a peer advertised is not yet known. */ -void ProcessBlockAvailability(NodeId nodeid) { - CNodeState *state = State(nodeid); - assert(state != NULL); - - if (!state->hashLastUnknownBlock.IsNull()) { - BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); - if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { - if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) - state->pindexBestKnownBlock = itOld->second; - state->hashLastUnknownBlock.SetNull(); - } - } -} - -/** Update tracking information about which blocks a peer is assumed to have. */ -void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { - CNodeState *state = State(nodeid); - assert(state != NULL); - - ProcessBlockAvailability(nodeid); - - BlockMap::iterator it = mapBlockIndex.find(hash); - if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { - // An actually better block was announced. - if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) - state->pindexBestKnownBlock = it->second; - } else { - // An unknown block was announced; just assume that the latest one is the best one. - state->hashLastUnknownBlock = hash; - } -} - -void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) { - if (!nodestate->fSupportsDesiredCmpctVersion) { - // Never ask from peers who can't provide witnesses. - return; - } - if (nodestate->fProvidesHeaderAndIDs) { - for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { - if (*it == pfrom->GetId()) { - lNodesAnnouncingHeaderAndIDs.erase(it); - lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); - return; - } - } - bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; - if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { - // As per BIP152, we only get 3 of our peers to announce - // blocks using compact encodings. - bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){ - pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); - return true; - }); - if(found) - lNodesAnnouncingHeaderAndIDs.pop_front(); - } - fAnnounceUsingCMPCTBLOCK = true; - pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); - lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); - } -} - -// Requires cs_main -bool CanDirectFetch(const Consensus::Params &consensusParams) -{ - return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; -} - -// Requires cs_main -bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex) -{ - if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) - return true; - if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) - return true; - return false; -} - -/** Find the last common ancestor two blocks have. - * Both pa and pb must be non-NULL. */ -CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) { - if (pa->nHeight > pb->nHeight) { - pa = pa->GetAncestor(pb->nHeight); - } else if (pb->nHeight > pa->nHeight) { - pb = pb->GetAncestor(pa->nHeight); - } - - while (pa != pb && pa && pb) { - pa = pa->pprev; - pb = pb->pprev; - } - - // Eventually all chain branches meet at the genesis block. - assert(pa == pb); - return pa; -} - -/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has - * at most count entries. */ -void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) { - if (count == 0) - return; - - vBlocks.reserve(vBlocks.size() + count); - CNodeState *state = State(nodeid); - assert(state != NULL); - - // Make sure pindexBestKnownBlock is up to date, we'll need it. - ProcessBlockAvailability(nodeid); - - if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { - // This peer has nothing interesting. - return; - } - - if (state->pindexLastCommonBlock == NULL) { - // Bootstrap quickly by guessing a parent of our best tip is the forking point. - // Guessing wrong in either direction is not a problem. - state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; - } - - // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor - // of its current tip anymore. Go back enough to fix that. - state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); - if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) - return; - - std::vector<CBlockIndex*> vToFetch; - CBlockIndex *pindexWalk = state->pindexLastCommonBlock; - // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last - // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to - // download that next block if the window were 1 larger. - int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; - int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); - NodeId waitingfor = -1; - while (pindexWalk->nHeight < nMaxHeight) { - // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards - // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive - // as iterating over ~100 CBlockIndex* entries anyway. - int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); - vToFetch.resize(nToFetch); - pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); - vToFetch[nToFetch - 1] = pindexWalk; - for (unsigned int i = nToFetch - 1; i > 0; i--) { - vToFetch[i - 1] = vToFetch[i]->pprev; - } - - // Iterate over those blocks in vToFetch (in forward direction), adding the ones that - // are not yet downloaded and not in flight to vBlocks. In the mean time, update - // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's - // already part of our chain (and therefore don't need it even if pruned). - BOOST_FOREACH(CBlockIndex* pindex, vToFetch) { - if (!pindex->IsValid(BLOCK_VALID_TREE)) { - // We consider the chain that this peer is on invalid. - return; - } - if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) { - // We wouldn't download this block or its descendants from this peer. - return; - } - if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { - if (pindex->nChainTx) - state->pindexLastCommonBlock = pindex; - } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { - // The block is not already downloaded, and not yet in flight. - if (pindex->nHeight > nWindowEnd) { - // We reached the end of the window. - if (vBlocks.size() == 0 && waitingfor != nodeid) { - // We aren't able to fetch anything, but we would be if the download window was one larger. - nodeStaller = waitingfor; - } - return; - } - vBlocks.push_back(pindex); - if (vBlocks.size() == count) { - return; - } - } else if (waitingfor == -1) { - // This is the first already-in-flight block. - waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; - } - } - } -} - } // anon namespace -bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { - LOCK(cs_main); - CNodeState *state = State(nodeid); - if (state == NULL) - return false; - stats.nMisbehavior = state->nMisbehavior; - stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; - stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; - BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) { - if (queue.pindex) - stats.vHeightInFlight.push_back(queue.pindex->nHeight); - } - return true; -} - -void RegisterNodeSignals(CNodeSignals& nodeSignals) -{ - nodeSignals.ProcessMessages.connect(&ProcessMessages); - nodeSignals.SendMessages.connect(&SendMessages); - nodeSignals.InitializeNode.connect(&InitializeNode); - nodeSignals.FinalizeNode.connect(&FinalizeNode); -} - -void UnregisterNodeSignals(CNodeSignals& nodeSignals) -{ - nodeSignals.ProcessMessages.disconnect(&ProcessMessages); - nodeSignals.SendMessages.disconnect(&SendMessages); - nodeSignals.InitializeNode.disconnect(&InitializeNode); - nodeSignals.FinalizeNode.disconnect(&FinalizeNode); -} - CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) { // Find the first block the caller has in the main chain @@ -691,111 +176,15 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc CCoinsViewCache *pcoinsTip = NULL; CBlockTreeDB *pblocktree = NULL; -////////////////////////////////////////////////////////////////////////////// -// -// mapOrphanTransactions -// - -bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - uint256 hash = tx.GetHash(); - if (mapOrphanTransactions.count(hash)) - return false; - - // Ignore big transactions, to avoid a - // send-big-orphans memory exhaustion attack. If a peer has a legitimate - // large transaction with a missing parent then we assume - // it will rebroadcast it later, after the parent transaction(s) - // have been mined or received. - // 100 orphans, each of which is at most 99,999 bytes big is - // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case): - unsigned int sz = GetTransactionWeight(tx); - if (sz >= MAX_STANDARD_TX_WEIGHT) - { - LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); - return false; - } - - auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); - assert(ret.second); - BOOST_FOREACH(const CTxIn& txin, tx.vin) { - mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); - } - - LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(), - mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); - return true; -} - -int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); - if (it == mapOrphanTransactions.end()) - return 0; - BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin) - { - auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); - if (itPrev == mapOrphanTransactionsByPrev.end()) - continue; - itPrev->second.erase(it); - if (itPrev->second.empty()) - mapOrphanTransactionsByPrev.erase(itPrev); - } - mapOrphanTransactions.erase(it); - return 1; -} - -void EraseOrphansFor(NodeId peer) -{ - int nErased = 0; - map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); - while (iter != mapOrphanTransactions.end()) - { - map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid - if (maybeErase->second.fromPeer == peer) - { - nErased += EraseOrphanTx(maybeErase->second.tx.GetHash()); - } - } - if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); -} - +enum FlushStateMode { + FLUSH_STATE_NONE, + FLUSH_STATE_IF_NEEDED, + FLUSH_STATE_PERIODIC, + FLUSH_STATE_ALWAYS +}; -unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - unsigned int nEvicted = 0; - static int64_t nNextSweep; - int64_t nNow = GetTime(); - if (nNextSweep <= nNow) { - // Sweep out expired orphan pool entries: - int nErased = 0; - int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; - map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); - while (iter != mapOrphanTransactions.end()) - { - map<uint256, COrphanTx>::iterator maybeErase = iter++; - if (maybeErase->second.nTimeExpire <= nNow) { - nErased += EraseOrphanTx(maybeErase->second.tx.GetHash()); - } else { - nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); - } - } - // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan. - nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; - if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased); - } - while (mapOrphanTransactions.size() > nMaxOrphans) - { - // Evict a random orphan: - uint256 randomhash = GetRandHash(); - map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash); - if (it == mapOrphanTransactions.end()) - it = mapOrphanTransactions.begin(); - EraseOrphanTx(it->first); - ++nEvicted; - } - return nEvicted; -} +// See definition for documentation +bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode); bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) { @@ -1066,7 +455,7 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i -bool CheckTransaction(const CTransaction& tx, CValidationState &state) +bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fCheckDuplicateInputs) { // Basic checks that don't depend on any context if (tx.vin.empty()) @@ -1090,13 +479,14 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge"); } - // Check for duplicate inputs - set<COutPoint> vInOutPoints; - for (const auto& txin : tx.vin) - { - if (vInOutPoints.count(txin.prevout)) - return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate"); - vInOutPoints.insert(txin.prevout); + // Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock + if (fCheckDuplicateInputs) { + set<COutPoint> vInOutPoints; + for (const auto& txin : tx.vin) + { + if (!vInOutPoints.insert(txin.prevout).second) + return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate"); + } } if (tx.IsCoinBase()) @@ -1135,7 +525,7 @@ std::string FormatStateMessage(const CValidationState &state) } bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const CTransaction& tx, bool fLimitFree, - bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, + bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, std::vector<uint256>& vHashTxnToUncache) { const uint256 hash = tx.GetHash(); @@ -1150,14 +540,6 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C if (tx.IsCoinBase()) return state.DoS(100, false, REJECT_INVALID, "coinbase"); - // Don't relay version 2 transactions until CSV is active, and we can be - // sure that such transactions will be mined (unless we're on - // -testnet/-regtest). - const CChainParams& chainparams = Params(); - if (fRequireStandard && tx.nVersion >= 2 && VersionBitsTipState(chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV) != THRESHOLD_ACTIVE) { - return state.DoS(0, false, REJECT_NONSTANDARD, "premature-version2-tx"); - } - // Reject transactions with witness before segregated witness activates (override with -prematurewitness) bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus()); if (!GetBoolArg("-prematurewitness",false) && !tx.wit.IsNull() && !witnessEnabled) { @@ -1308,7 +690,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C } } - CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp); + CTxMemPoolEntry entry(tx, nFees, nAcceptTime, dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp); unsigned int nSize = entry.GetTxSize(); // Check that the transaction doesn't have an excessive number of @@ -1572,29 +954,38 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C return true; } -bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, - bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee) +bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, + bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const CAmount nAbsurdFee) { std::vector<uint256> vHashTxToUncache; - bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, fOverrideMempoolLimit, nAbsurdFee, vHashTxToUncache); + bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, fOverrideMempoolLimit, nAbsurdFee, vHashTxToUncache); if (!res) { BOOST_FOREACH(const uint256& hashTx, vHashTxToUncache) pcoinsTip->Uncache(hashTx); } + // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits + CValidationState stateDummy; + FlushStateToDisk(stateDummy, FLUSH_STATE_PERIODIC); return res; } -/** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */ -bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow) +bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, + bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee) +{ + return AcceptToMemoryPoolWithTime(pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), fOverrideMempoolLimit, nAbsurdFee); +} + +/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */ +bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow) { CBlockIndex *pindexSlow = NULL; LOCK(cs_main); - std::shared_ptr<const CTransaction> ptx = mempool.get(hash); + CTransactionRef ptx = mempool.get(hash); if (ptx) { - txOut = *ptx; + txOut = ptx; return true; } @@ -1613,7 +1004,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::P return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); - if (txOut.GetHash() != hash) + if (txOut->GetHash() != hash) return error("%s: txid mismatch", __func__); return true; } @@ -1634,8 +1025,8 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::P if (pindexSlow) { CBlock block; if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) { - BOOST_FOREACH(const CTransaction &tx, block.vtx) { - if (tx.GetHash() == hash) { + for (const auto& tx : block.vtx) { + if (tx->GetHash() == hash) { txOut = tx; hashBlock = pindexSlow->GetBlockHash(); return true; @@ -1665,7 +1056,7 @@ bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHea return error("WriteBlockToDisk: OpenBlockFile failed"); // Write index header - unsigned int nSize = fileout.GetSerializeSize(block); + unsigned int nSize = GetSerializeSize(fileout, block); fileout << FLATDATA(messageStart) << nSize; // Write block @@ -1740,13 +1131,14 @@ bool IsInitialBlockDownload() return false; if (fImporting || fReindex) return true; - if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints())) + if (chainActive.Tip() == NULL) + return true; + if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork)) return true; - bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 || - std::max(chainActive.Tip()->GetBlockTime(), pindexBestHeader->GetBlockTime()) < GetTime() - nMaxTipAge); - if (!state) - latchToFalse.store(true, std::memory_order_relaxed); - return state; + if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) + return true; + latchToFalse.store(true, std::memory_order_relaxed); + return false; } bool fLargeWorkForkFound = false; @@ -1774,7 +1166,7 @@ void CheckForkWarningConditions() { AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks - // (we assume we don't get stuck on a fork before the last checkpoint) + // (we assume we don't get stuck on a fork before finishing our initial sync) if (IsInitialBlockDownload()) return; @@ -1844,26 +1236,6 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) CheckForkWarningConditions(); } -// Requires cs_main. -void Misbehaving(NodeId pnode, int howmuch) -{ - if (howmuch == 0) - return; - - CNodeState *state = State(pnode); - if (state == NULL) - return; - - state->nMisbehavior += howmuch; - int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); - if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) - { - LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); - state->fShouldBan = true; - } else - LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); -} - void static InvalidChainFound(CBlockIndex* pindexNew) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) @@ -2054,7 +1426,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint return error("%s: OpenUndoFile failed", __func__); // Write index header - unsigned int nSize = fileout.GetSerializeSize(blockundo); + unsigned int nSize = GetSerializeSize(fileout, blockundo); fileout << FLATDATA(messageStart) << nSize; // Write undo data @@ -2174,7 +1546,7 @@ bool DisconnectBlock(const CBlock& block, CValidationState& state, const CBlockI // undo transactions in reverse order for (int i = block.vtx.size() - 1; i >= 0; i--) { - const CTransaction &tx = block.vtx[i]; + const CTransaction &tx = *(block.vtx[i]); uint256 hash = tx.GetHash(); // Check that all outputs are available and match the outputs in the block itself @@ -2368,8 +1740,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash)); if (fEnforceBIP30) { - BOOST_FOREACH(const CTransaction& tx, block.vtx) { - const CCoins* coins = view.AccessCoins(tx.GetHash()); + for (const auto& tx : block.vtx) { + const CCoins* coins = view.AccessCoins(tx->GetHash()); if (coins && !coins->IsPruned()) return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); @@ -2412,7 +1784,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); - std::vector<uint256> vOrphanErase; std::vector<int> prevheights; CAmount nFees = 0; int nInputs = 0; @@ -2425,7 +1796,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated for (unsigned int i = 0; i < block.vtx.size(); i++) { - const CTransaction &tx = block.vtx[i]; + const CTransaction &tx = *(block.vtx[i]); nInputs += tx.vin.size(); @@ -2443,17 +1814,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin prevheights[j] = view.AccessCoins(tx.vin[j].prevout.hash)->nHeight; } - // Which orphan pool entries must we evict? - for (size_t j = 0; j < tx.vin.size(); j++) { - auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout); - if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; - for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { - const CTransaction& orphanTx = (*mi)->second.tx; - const uint256& orphanHash = orphanTx.GetHash(); - vOrphanErase.push_back(orphanHash); - } - } - if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) { return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); @@ -2495,10 +1855,10 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001); CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); - if (block.vtx[0].GetValueOut() > blockReward) + if (block.vtx[0]->GetValueOut() > blockReward) return state.DoS(100, error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", - block.vtx[0].GetValueOut(), blockReward), + block.vtx[0]->GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount"); if (!control.Wait()) @@ -2541,16 +1901,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // Watch for changes to the previous coinbase transaction. static uint256 hashPrevBestCoinBase; GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase); - hashPrevBestCoinBase = block.vtx[0].GetHash(); + hashPrevBestCoinBase = block.vtx[0]->GetHash(); - // Erase orphan transactions include or precluded by this block - if (vOrphanErase.size()) { - int nErased = 0; - BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) { - nErased += EraseOrphanTx(orphanHash); - } - LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased); - } int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001); @@ -2558,13 +1910,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin return true; } -enum FlushStateMode { - FLUSH_STATE_NONE, - FLUSH_STATE_IF_NEEDED, - FLUSH_STATE_PERIODIC, - FLUSH_STATE_ALWAYS -}; - /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with @@ -2684,7 +2029,6 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) { chainActive.SetTip(pindexNew); // New best block - nTimeBestReceived = GetTime(); mempool.AddTransactionsUpdated(1); cvBlockChange.notify_all(); @@ -2756,7 +2100,8 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara CCoinsViewCache view(pcoinsTip); if (!DisconnectBlock(block, state, pindexDelete, view)) return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); - assert(view.Flush()); + bool flushed = view.Flush(); + assert(flushed); } LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); // Write the chain state to disk, if necessary. @@ -2766,12 +2111,12 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara if (!fBare) { // Resurrect mempool transactions from the disconnected block. std::vector<uint256> vHashUpdate; - BOOST_FOREACH(const CTransaction &tx, block.vtx) { + for (const auto& it : block.vtx) { + const CTransaction& tx = *it; // ignore validation errors in resurrected transactions - list<CTransaction> removed; CValidationState stateDummy; if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL, true)) { - mempool.removeRecursive(tx, removed); + mempool.removeRecursive(tx); } else if (mempool.exists(tx.GetHash())) { vHashUpdate.push_back(tx.GetHash()); } @@ -2788,8 +2133,8 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara UpdateTip(pindexDelete->pprev, chainparams); // Let wallets know transactions went from 1-confirmed to // 0-confirmed or conflicted: - BOOST_FOREACH(const CTransaction &tx, block.vtx) { - GetMainSignals().SyncTransaction(tx, pindexDelete->pprev, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK); + for (const auto& tx : block.vtx) { + GetMainSignals().SyncTransaction(*tx, pindexDelete->pprev, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK); } return true; } @@ -2801,28 +2146,43 @@ static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; /** + * Used to track blocks whose transactions were applied to the UTXO state as a + * part of a single ActivateBestChainStep call. + */ +struct ConnectTrace { + std::vector<std::pair<CBlockIndex*, std::shared_ptr<const CBlock> > > blocksConnected; +}; + +/** * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock * corresponding to pindexNew, to bypass loading it again from disk. + * + * The block is always added to connectTrace (either after loading from disk or by copying + * pblock) - if that is not intended, care must be taken to remove the last entry in + * blocksConnected in case of failure. */ -bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const CBlock* pblock, std::list<CTransaction> &txConflicted, std::vector<std::tuple<CTransaction,CBlockIndex*,int>> &txChanged) +bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace) { assert(pindexNew->pprev == chainActive.Tip()); // Read block from disk. int64_t nTime1 = GetTimeMicros(); - CBlock block; if (!pblock) { - if (!ReadBlockFromDisk(block, pindexNew, chainparams.GetConsensus())) + std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>(); + connectTrace.blocksConnected.emplace_back(pindexNew, pblockNew); + if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus())) return AbortNode(state, "Failed to read block"); - pblock = █ + } else { + connectTrace.blocksConnected.emplace_back(pindexNew, pblock); } + const CBlock& blockConnecting = *connectTrace.blocksConnected.back().second; // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CCoinsViewCache view(pcoinsTip); - bool rv = ConnectBlock(*pblock, state, pindexNew, view, chainparams); - GetMainSignals().BlockChecked(*pblock, state); + bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams); + GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { if (state.IsInvalid()) InvalidBlockFound(pindexNew, state); @@ -2830,7 +2190,8 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); - assert(view.Flush()); + bool flushed = view.Flush(); + assert(flushed); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); @@ -2840,13 +2201,10 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool.; - mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload()); + mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight, !IsInitialBlockDownload()); // Update chainActive & related variables. UpdateTip(pindexNew, chainparams); - for(unsigned int i=0; i < pblock->vtx.size(); i++) - txChanged.emplace_back(pblock->vtx[i], pindexNew, i); - int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); @@ -2927,7 +2285,7 @@ static void PruneBlockIndexCandidates() { * Try to make some progress towards making pindexMostWork the active block. * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. */ -static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const CBlock* pblock, bool& fInvalidFound, std::list<CTransaction>& txConflicted, std::vector<std::tuple<CTransaction,CBlockIndex*,int>>& txChanged) +static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) { AssertLockHeld(cs_main); const CBlockIndex *pindexOldTip = chainActive.Tip(); @@ -2960,7 +2318,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c // Connect new blocks. BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) { - if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL, txConflicted, txChanged)) { + if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace)) { if (state.IsInvalid()) { // The block violates a consensus rule. if (!state.CorruptionPossible()) @@ -2968,6 +2326,8 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c state = CValidationState(); fInvalidFound = true; fContinue = false; + // If we didn't actually connect the block, don't notify listeners about it + connectTrace.blocksConnected.pop_back(); break; } else { // A system error occurred (disk space, database error, ...). @@ -3006,9 +2366,8 @@ static void NotifyHeaderTip() { CBlockIndex* pindexHeader = NULL; { LOCK(cs_main); - if (!setBlockIndexCandidates.empty()) { - pindexHeader = *setBlockIndexCandidates.rbegin(); - } + pindexHeader = pindexBestHeader; + if (pindexHeader != pindexHeaderOld) { fNotify = true; fInitialBlockDownload = IsInitialBlockDownload(); @@ -3026,20 +2385,16 @@ static void NotifyHeaderTip() { * or an activated best chain. pblock is either NULL or a pointer to a block * that is already loaded (to avoid loading it again from disk). */ -bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, const CBlock *pblock) { +bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { CBlockIndex *pindexMostWork = NULL; CBlockIndex *pindexNewTip = NULL; - std::vector<std::tuple<CTransaction,CBlockIndex*,int>> txChanged; - if (pblock) - txChanged.reserve(pblock->vtx.size()); do { - txChanged.clear(); boost::this_thread::interruption_point(); if (ShutdownRequested()) break; const CBlockIndex *pindexFork; - std::list<CTransaction> txConflicted; + ConnectTrace connectTrace; bool fInitialDownload; { LOCK(cs_main); @@ -3053,7 +2408,8 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, return true; bool fInvalidFound = false; - if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL, fInvalidFound, txConflicted, txChanged)) + std::shared_ptr<const CBlock> nullBlockPtr; + if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) return false; if (fInvalidFound) { @@ -3070,13 +2426,12 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, // throw all transactions though the signal-interface // while _not_ holding the cs_main lock - BOOST_FOREACH(const CTransaction &tx, txConflicted) - { - GetMainSignals().SyncTransaction(tx, pindexNewTip, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK); + for (const auto& pair : connectTrace.blocksConnected) { + assert(pair.second); + const CBlock& block = *(pair.second); + for (unsigned int i = 0; i < block.vtx.size(); i++) + GetMainSignals().SyncTransaction(*block.vtx[i], pair.first, i); } - // ... and about transactions that got confirmed: - for(unsigned int i = 0; i < txChanged.size(); i++) - GetMainSignals().SyncTransaction(std::get<0>(txChanged[i]), std::get<1>(txChanged[i]), std::get<2>(txChanged[i])); // Notify external listeners about the new tip. GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); @@ -3162,6 +2517,7 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C InvalidChainFound(pindex); mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev); return true; } @@ -3415,22 +2771,22 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); // First transaction must be coinbase, the rest must not be - if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) + if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase"); for (unsigned int i = 1; i < block.vtx.size(); i++) - if (block.vtx[i].IsCoinBase()) + if (block.vtx[i]->IsCoinBase()) return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase"); // Check transactions for (const auto& tx : block.vtx) - if (!CheckTransaction(tx, state)) + if (!CheckTransaction(*tx, state, false)) return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(), - strprintf("Transaction check failed (tx hash %s) %s", tx.GetHash().ToString(), state.GetDebugMessage())); + strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage())); unsigned int nSigOps = 0; for (const auto& tx : block.vtx) { - nSigOps += GetLegacySigOpCount(tx); + nSigOps += GetLegacySigOpCount(*tx); } if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST) return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount"); @@ -3466,8 +2822,8 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa static int GetWitnessCommitmentIndex(const CBlock& block) { int commitpos = -1; - for (size_t o = 0; o < block.vtx[0].vout.size(); o++) { - if (block.vtx[0].vout[o].scriptPubKey.size() >= 38 && block.vtx[0].vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0].vout[o].scriptPubKey[1] == 0x24 && block.vtx[0].vout[o].scriptPubKey[2] == 0xaa && block.vtx[0].vout[o].scriptPubKey[3] == 0x21 && block.vtx[0].vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0].vout[o].scriptPubKey[5] == 0xed) { + for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) { + if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) { commitpos = o; } } @@ -3478,10 +2834,12 @@ void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPr { int commitpos = GetWitnessCommitmentIndex(block); static const std::vector<unsigned char> nonce(32, 0x00); - if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && block.vtx[0].wit.IsEmpty()) { - block.vtx[0].wit.vtxinwit.resize(1); - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.resize(1); - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0] = nonce; + if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && block.vtx[0]->wit.IsEmpty()) { + CMutableTransaction tx(*block.vtx[0]); + tx.wit.vtxinwit.resize(1); + tx.wit.vtxinwit[0].scriptWitness.stack.resize(1); + tx.wit.vtxinwit[0].scriptWitness.stack[0] = nonce; + block.vtx[0] = MakeTransactionRef(std::move(tx)); } } @@ -3489,15 +2847,8 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc { std::vector<unsigned char> commitment; int commitpos = GetWitnessCommitmentIndex(block); - bool fHaveWitness = false; - for (size_t t = 1; t < block.vtx.size(); t++) { - if (!block.vtx[t].wit.IsNull()) { - fHaveWitness = true; - break; - } - } std::vector<unsigned char> ret(32, 0x00); - if (fHaveWitness && IsWitnessEnabled(pindexPrev, consensusParams)) { + if (consensusParams.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) { if (commitpos == -1) { uint256 witnessroot = BlockWitnessMerkleRoot(block, NULL); CHash256().Write(witnessroot.begin(), 32).Write(&ret[0], 32).Finalize(witnessroot.begin()); @@ -3512,8 +2863,9 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc out.scriptPubKey[5] = 0xed; memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32); commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end()); - const_cast<std::vector<CTxOut>*>(&block.vtx[0].vout)->push_back(out); - block.vtx[0].UpdateHash(); + CMutableTransaction tx(*block.vtx[0]); + tx.vout.push_back(out); + block.vtx[0] = MakeTransactionRef(std::move(tx)); } } UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams); @@ -3562,7 +2914,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co // Check that all transactions are finalized for (const auto& tx : block.vtx) { - if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) { + if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) { return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction"); } } @@ -3571,8 +2923,8 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co if (nHeight >= consensusParams.BIP34Height) { CScript expect = CScript() << nHeight; - if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || - !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) { + if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || + !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase"); } } @@ -3594,11 +2946,11 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co // The malleation check is ignored; as the transaction tree itself // already does not permit it, it is impossible to trigger in the // witness tree. - if (block.vtx[0].wit.vtxinwit.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0].size() != 32) { + if (block.vtx[0]->wit.vtxinwit.size() != 1 || block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack.size() != 1 || block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack[0].size() != 32) { return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__)); } - CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); - if (memcmp(hashWitness.begin(), &block.vtx[0].vout[commitpos].scriptPubKey[6], 32)) { + CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); + if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { return state.DoS(100, false, REJECT_INVALID, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__)); } fHaveWitness = true; @@ -3608,7 +2960,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam if (!fHaveWitness) { for (size_t i = 0; i < block.vtx.size(); i++) { - if (!block.vtx[i].wit.IsNull()) { + if (!block.vtx[i]->wit.IsNull()) { return state.DoS(100, false, REJECT_INVALID, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__)); } } @@ -3627,7 +2979,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co return true; } -static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL) +static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) { AssertLockHeld(cs_main); // Check for duplicate @@ -3671,6 +3023,23 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state if (ppindex) *ppindex = pindex; + CheckBlockIndex(chainparams.GetConsensus()); + + return true; +} + +// Exposed wrapper for AcceptBlockHeader +bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) +{ + { + LOCK(cs_main); + for (const CBlockHeader& header : headers) { + if (!AcceptBlockHeader(header, state, chainparams, ppindex)) { + return false; + } + } + } + NotifyHeaderTip(); return true; } @@ -3698,6 +3067,11 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha // not process unrequested blocks. bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP)); + // TODO: Decouple this function from the block download logic by removing fRequested + // This requires some new chain datastructure to efficiently look up if a + // block is in a chain leading to a candidate for best tip, despite not + // being such a candidate itself. + // TODO: deal better with return value and error conditions for duplicate // and unrequested blocks. if (fAlreadyHave) return true; @@ -3742,28 +3116,26 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha return true; } -bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp) +bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock) { { LOCK(cs_main); - bool fRequested = MarkBlockAsReceived(pblock->GetHash()); - fRequested |= fForceProcessing; // Store to disk CBlockIndex *pindex = NULL; - bool fNewBlock = false; - bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp, &fNewBlock); - if (pindex && pfrom) { - mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId(); - if (fNewBlock) pfrom->nLastBlockTime = GetTime(); - } + if (fNewBlock) *fNewBlock = false; + CValidationState state; + bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock); CheckBlockIndex(chainparams.GetConsensus()); - if (!ret) + if (!ret) { + GetMainSignals().BlockChecked(*pblock, state); return error("%s: AcceptBlock FAILED", __func__); + } } NotifyHeaderTip(); + CValidationState state; // Only used to report errors, not invalidity - ignore it if (!ActivateBestChain(state, chainparams, pblock)) return error("%s: ActivateBestChain failed", __func__); @@ -3968,9 +3340,8 @@ CBlockIndex * InsertBlockIndex(uint256 hash) return pindexNew; } -bool static LoadBlockIndexDB() +bool static LoadBlockIndexDB(const CChainParams& chainparams) { - const CChainParams& chainparams = Params(); if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex)) return false; @@ -4265,6 +3636,9 @@ bool RewindBlockIndex(const CChainParams& params) return true; } +// May NOT be used after any connections are up as much +// of the peer-processing logic assumes a consistent +// block index state void UnloadBlockIndex() { LOCK(cs_main); @@ -4273,20 +3647,12 @@ void UnloadBlockIndex() pindexBestInvalid = NULL; pindexBestHeader = NULL; mempool.clear(); - mapOrphanTransactions.clear(); - mapOrphanTransactionsByPrev.clear(); - nSyncStarted = 0; mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; nBlockSequenceId = 1; - mapBlockSource.clear(); - mapBlocksInFlight.clear(); - nPreferredDownload = 0; setDirtyBlockIndex.clear(); setDirtyFileInfo.clear(); - mapNodeState.clear(); - recentRejects.reset(NULL); versionbitscache.Clear(); for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) { warningcache[b].clear(); @@ -4299,10 +3665,10 @@ void UnloadBlockIndex() fHavePruned = false; } -bool LoadBlockIndex() +bool LoadBlockIndex(const CChainParams& chainparams) { // Load block index from databases - if (!fReindex && !LoadBlockIndexDB()) + if (!fReindex && !LoadBlockIndexDB(chainparams)) return false; return true; } @@ -4311,9 +3677,6 @@ bool InitBlockIndex(const CChainParams& chainparams) { LOCK(cs_main); - // Initialize global variables that cannot be constructed at startup. - recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); - // Check whether we're already initialized if (chainActive.Genesis() != NULL) return true; @@ -4689,2239 +4052,132 @@ std::string GetWarnings(const std::string& strFor) assert(!"GetWarnings(): invalid parameter"); return "error"; } + std::string CBlockFileInfo::ToString() const { + return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); + } - - - - - - - -////////////////////////////////////////////////////////////////////////////// -// -// blockchain -> download logic notification -// - -void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { - const int nNewHeight = pindexNew->nHeight; - connman->SetBestHeight(nNewHeight); - - if (!fInitialDownload) { - // Find the hashes of all blocks that weren't previously in the best chain. - std::vector<uint256> vHashes; - const CBlockIndex *pindexToAnnounce = pindexNew; - while (pindexToAnnounce != pindexFork) { - vHashes.push_back(pindexToAnnounce->GetBlockHash()); - pindexToAnnounce = pindexToAnnounce->pprev; - if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { - // Limit announcements in case of a huge reorganization. - // Rely on the peer's synchronization mechanism in that case. - break; - } - } - // Relay inventory, but don't relay old inventory during initial block download. - connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) { - if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { - BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) { - pnode->PushBlockHash(hash); - } - } - }); - } -} - -void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) { - LOCK(cs_main); - - const uint256 hash(block.GetHash()); - std::map<uint256, NodeId>::iterator it = mapBlockSource.find(hash); - - int nDoS = 0; - if (state.IsInvalid(nDoS)) { - if (it != mapBlockSource.end() && State(it->second)) { - assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes - CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash}; - State(it->second)->rejects.push_back(reject); - if (nDoS > 0) - Misbehaving(it->second, nDoS); - } - } - if (it != mapBlockSource.end()) - mapBlockSource.erase(it); -} - -////////////////////////////////////////////////////////////////////////////// -// -// Messages -// - - -bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - switch (inv.type) - { - case MSG_TX: - case MSG_WITNESS_TX: - { - assert(recentRejects); - if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) - { - // If the chain tip has changed previously rejected transactions - // might be now valid, e.g. due to a nLockTime'd tx becoming valid, - // or a double-spend. Reset the rejects filter and give those - // txs a second chance. - hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); - recentRejects->reset(); - } - - // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude - // requesting or processing some txs which have already been included in a block - return recentRejects->contains(inv.hash) || - mempool.exists(inv.hash) || - mapOrphanTransactions.count(inv.hash) || - pcoinsTip->HaveCoinsInCache(inv.hash); - } - case MSG_BLOCK: - case MSG_WITNESS_BLOCK: - return mapBlockIndex.count(inv.hash); - } - // Don't know what it is, just say we already got one - return true; -} - -static void RelayTransaction(const CTransaction& tx, CConnman& connman) -{ - CInv inv(MSG_TX, tx.GetHash()); - connman.ForEachNode([&inv](CNode* pnode) - { - pnode->PushInventory(inv); - }); -} - -static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman) +ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos) { - int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s) - - // Relay to a limited number of other nodes - // Use deterministic randomness to send to the same nodes for 24 hours - // at a time so the addrKnowns of the chosen nodes prevent repeats - uint64_t hashAddr = addr.GetHash(); - std::multimap<uint64_t, CNode*> mapMix; - const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60)); - FastRandomContext insecure_rand; - - auto sortfunc = [&mapMix, &hasher](CNode* pnode) { - if (pnode->nVersion >= CADDR_TIME_VERSION) { - uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); - mapMix.emplace(hashKey, pnode); - } - }; - - auto pushfunc = [&addr, &mapMix, &nRelayNodes, &insecure_rand] { - for (auto mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi) - mi->second->PushAddress(addr, insecure_rand); - }; - - connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); + LOCK(cs_main); + return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache); } -void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman) +int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos) { - std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); - unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); - - vector<CInv> vNotFound; - LOCK(cs_main); - - while (it != pfrom->vRecvGetData.end()) { - // Don't bother if send buffer is too full to respond anyway - if (pfrom->nSendSize >= nMaxSendBufferSize) - break; - - const CInv &inv = *it; - { - boost::this_thread::interruption_point(); - it++; - - if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) - { - bool send = false; - BlockMap::iterator mi = mapBlockIndex.find(inv.hash); - if (mi != mapBlockIndex.end()) - { - if (chainActive.Contains(mi->second)) { - send = true; - } else { - static const int nOneMonth = 30 * 24 * 60 * 60; - // To prevent fingerprinting attacks, only send blocks outside of the active - // chain if they are valid, and no more than a month older (both in time, and in - // best equivalent proof of work) than the best header chain we know about. - send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) && - (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && - (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth); - if (!send) { - LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); - } - } - } - // disconnect node in case we have reached the outbound limit for serving historical blocks - // never disconnect whitelisted nodes - static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical - if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) - { - LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId()); - - //disconnect node - pfrom->fDisconnect = true; - send = false; - } - // Pruned nodes may have deleted the block, so check whether - // it's available before trying to send. - if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) - { - // Send block from disk - CBlock block; - if (!ReadBlockFromDisk(block, (*mi).second, consensusParams)) - assert(!"cannot load block from disk"); - if (inv.type == MSG_BLOCK) - pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); - else if (inv.type == MSG_WITNESS_BLOCK) - pfrom->PushMessage(NetMsgType::BLOCK, block); - else if (inv.type == MSG_FILTERED_BLOCK) - { - bool sendMerkleBlock = false; - CMerkleBlock merkleBlock; - { - LOCK(pfrom->cs_filter); - if (pfrom->pfilter) { - sendMerkleBlock = true; - merkleBlock = CMerkleBlock(block, *pfrom->pfilter); - } - } - if (sendMerkleBlock) { - pfrom->PushMessage(NetMsgType::MERKLEBLOCK, merkleBlock); - // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see - // This avoids hurting performance by pointlessly requiring a round-trip - // Note that there is currently no way for a node to request any single transactions we didn't send here - - // they must either disconnect and retry or request the full block. - // Thus, the protocol spec specified allows for us to provide duplicate txn here, - // however we MUST always provide at least what the remote peer needs - typedef std::pair<unsigned int, uint256> PairType; - BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn) - pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]); - } - // else - // no response - } - else if (inv.type == MSG_CMPCT_BLOCK) - { - // If a peer is asking for old blocks, we're almost guaranteed - // they wont have a useful mempool to match against a compact block, - // and we don't feel like constructing the object for them, so - // instead we respond with the full, non-compact block. - bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; - if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { - CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness); - pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); - } else - pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); - } - - // Trigger the peer node to send a getblocks request for the next batch of inventory - if (inv.hash == pfrom->hashContinue) - { - // Bypass PushInventory, this must send even if redundant, - // and we want it right after the last block so they don't - // wait for other stuff first. - vector<CInv> vInv; - vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); - pfrom->PushMessage(NetMsgType::INV, vInv); - pfrom->hashContinue.SetNull(); - } - } - } - else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) - { - // Send stream from relay memory - bool push = false; - auto mi = mapRelay.find(inv.hash); - if (mi != mapRelay.end()) { - pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second); - push = true; - } else if (pfrom->timeLastMempoolReq) { - auto txinfo = mempool.info(inv.hash); - // To protect privacy, do not answer getdata using the mempool when - // that TX couldn't have been INVed in reply to a MEMPOOL request. - if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { - pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx); - push = true; - } - } - if (!push) { - vNotFound.push_back(inv); - } - } - - // Track requests for our stuff. - GetMainSignals().Inventory(inv.hash); - - if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) - break; - } - } - - pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); - - if (!vNotFound.empty()) { - // Let the peer know that we didn't find what it asked for, so it doesn't - // have to wait around forever. Currently only SPV clients actually care - // about this message: it's needed when they are recursively walking the - // dependencies of relevant unconfirmed transactions. SPV clients want to - // do that because they want to know about (and store and rebroadcast and - // risk analyze) the dependencies of transactions relevant to them, without - // having to download the entire memory pool. - pfrom->PushMessage(NetMsgType::NOTFOUND, vNotFound); - } + return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache); } -uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) { - uint32_t nFetchFlags = 0; - if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) { - nFetchFlags |= MSG_WITNESS_FLAG; - } - return nFetchFlags; -} +static const uint64_t MEMPOOL_DUMP_VERSION = 1; -bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman) +bool LoadMempool(void) { - unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); - - LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); - if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0) - { - LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); - return true; - } - - - if (!(pfrom->GetLocalServices() & NODE_BLOOM) && - (strCommand == NetMsgType::FILTERLOAD || - strCommand == NetMsgType::FILTERADD || - strCommand == NetMsgType::FILTERCLEAR)) - { - if (pfrom->nVersion >= NO_BLOOM_VERSION) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 100); - return false; - } else { - pfrom->fDisconnect = true; - return false; - } - } - - - if (strCommand == NetMsgType::VERSION) - { - // Feeler connections exist only to verify if address is online. - if (pfrom->fFeeler) { - assert(pfrom->fInbound == false); - pfrom->fDisconnect = true; - } - - // Each connection can only send one version message - if (pfrom->nVersion != 0) - { - pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message")); - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 1); - return false; - } - - int64_t nTime; - CAddress addrMe; - CAddress addrFrom; - uint64_t nNonce = 1; - uint64_t nServiceInt; - vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe; - pfrom->nServices = ServiceFlags(nServiceInt); - if (!pfrom->fInbound) - { - connman.SetServices(pfrom->addr, pfrom->nServices); - } - if (pfrom->nServicesExpected & ~pfrom->nServices) - { - LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected); - pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD, - strprintf("Expected to offer services %08x", pfrom->nServicesExpected)); - pfrom->fDisconnect = true; - return false; - } - - if (pfrom->nVersion < MIN_PEER_PROTO_VERSION) - { - // disconnect from peers older than this proto version - LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion); - pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, - strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)); - pfrom->fDisconnect = true; - return false; - } - - if (pfrom->nVersion == 10300) - pfrom->nVersion = 300; - if (!vRecv.empty()) - vRecv >> addrFrom >> nNonce; - if (!vRecv.empty()) { - vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH); - pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); - } - if (!vRecv.empty()) { - vRecv >> pfrom->nStartingHeight; - } - { - LOCK(pfrom->cs_filter); - if (!vRecv.empty()) - vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message - else - pfrom->fRelayTxes = true; - } - - // Disconnect if we connected to ourself - if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) - { - LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); - pfrom->fDisconnect = true; - return true; - } - - pfrom->addrLocal = addrMe; - if (pfrom->fInbound && addrMe.IsRoutable()) - { - SeenLocal(addrMe); - } - - // Be shy and don't send version until we hear - if (pfrom->fInbound) - pfrom->PushVersion(); - - pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); - - if((pfrom->nServices & NODE_WITNESS)) - { - LOCK(cs_main); - State(pfrom->GetId())->fHaveWitness = true; - } - - // Potentially mark this peer as a preferred download peer. - { - LOCK(cs_main); - UpdatePreferredDownload(pfrom, State(pfrom->GetId())); - } - - // Change version - pfrom->PushMessage(NetMsgType::VERACK); - pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); - - if (!pfrom->fInbound) - { - // Advertise our address - if (fListen && !IsInitialBlockDownload()) - { - CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices()); - FastRandomContext insecure_rand; - if (addr.IsRoutable()) - { - LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString()); - pfrom->PushAddress(addr, insecure_rand); - } else if (IsPeerAddrLocalGood(pfrom)) { - addr.SetIP(pfrom->addrLocal); - LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString()); - pfrom->PushAddress(addr, insecure_rand); - } - } - - // Get recent addresses - if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000) - { - pfrom->PushMessage(NetMsgType::GETADDR); - pfrom->fGetAddr = true; - } - connman.MarkAddressGood(pfrom->addr); - } - - pfrom->fSuccessfullyConnected = true; - - string remoteAddr; - if (fLogIPs) - remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); - - LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n", - pfrom->cleanSubVer, pfrom->nVersion, - pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, - remoteAddr); - - int64_t nTimeOffset = nTime - GetTime(); - pfrom->nTimeOffset = nTimeOffset; - AddTimeData(pfrom->addr, nTimeOffset); - } - - - else if (pfrom->nVersion == 0) - { - // Must have a version message before anything else - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 1); + int64_t nExpiryTimeout = GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; + FILE* filestr = fopen((GetDataDir() / "mempool.dat").string().c_str(), "r"); + CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); + if (file.IsNull()) { + LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n"); return false; } + int64_t count = 0; + int64_t skipped = 0; + int64_t failed = 0; + int64_t nNow = GetTime(); - else if (strCommand == NetMsgType::VERACK) - { - pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); - - // Mark this node as currently connected, so we update its timestamp later. - if (pfrom->fNetworkNode) { - LOCK(cs_main); - State(pfrom->GetId())->fCurrentlyConnected = true; - } - - if (pfrom->nVersion >= SENDHEADERS_VERSION) { - // Tell our peer we prefer to receive headers rather than inv's - // We send this to non-NODE NETWORK peers as well, because even - // non-NODE NETWORK peers can announce blocks (such as pruning - // nodes) - pfrom->PushMessage(NetMsgType::SENDHEADERS); - } - if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { - // Tell our peer we are willing to provide version 1 or 2 cmpctblocks - // However, we do not request new block announcements using - // cmpctblock messages. - // We send this to non-NODE NETWORK peers as well, because - // they may wish to request compact blocks from us - bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 2; - if (pfrom->GetLocalServices() & NODE_WITNESS) - pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); - nCMPCTBLOCKVersion = 1; - pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); - } - } - - - else if (strCommand == NetMsgType::ADDR) - { - vector<CAddress> vAddr; - vRecv >> vAddr; - - // Don't want addr from older versions unless seeding - if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000) - return true; - if (vAddr.size() > 1000) - { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message addr size() = %u", vAddr.size()); - } - - // Store the new addresses - vector<CAddress> vAddrOk; - int64_t nNow = GetAdjustedTime(); - int64_t nSince = nNow - 10 * 60; - BOOST_FOREACH(CAddress& addr, vAddr) - { - boost::this_thread::interruption_point(); - - if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) - continue; - - if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) - addr.nTime = nNow - 5 * 24 * 60 * 60; - pfrom->AddAddressKnown(addr); - bool fReachable = IsReachable(addr); - if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) - { - // Relay to a limited number of other nodes - RelayAddress(addr, fReachable, connman); - } - // Do not store addresses outside our network - if (fReachable) - vAddrOk.push_back(addr); - } - connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60); - if (vAddr.size() < 1000) - pfrom->fGetAddr = false; - if (pfrom->fOneShot) - pfrom->fDisconnect = true; - } - - else if (strCommand == NetMsgType::SENDHEADERS) - { - LOCK(cs_main); - State(pfrom->GetId())->fPreferHeaders = true; - } - - else if (strCommand == NetMsgType::SENDCMPCT) - { - bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 0; - vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; - if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) { - LOCK(cs_main); - // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness) - if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) { - State(pfrom->GetId())->fProvidesHeaderAndIDs = true; - State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2; - } - if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces - State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; - if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) { - if (pfrom->GetLocalServices() & NODE_WITNESS) - State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2); - else - State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1); - } - } - } - - - else if (strCommand == NetMsgType::INV) - { - vector<CInv> vInv; - vRecv >> vInv; - if (vInv.size() > MAX_INV_SZ) - { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message inv size() = %u", vInv.size()); - } - - bool fBlocksOnly = !fRelayTxes; - - // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true - if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) - fBlocksOnly = false; - - LOCK(cs_main); - - uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()); - - std::vector<CInv> vToFetch; - - for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) - { - CInv &inv = vInv[nInv]; - - boost::this_thread::interruption_point(); - - bool fAlreadyHave = AlreadyHave(inv); - LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); - - if (inv.type == MSG_TX) { - inv.type |= nFetchFlags; - } - - if (inv.type == MSG_BLOCK) { - UpdateBlockAvailability(pfrom->GetId(), inv.hash); - if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { - // First request the headers preceding the announced block. In the normal fully-synced - // case where a new block is announced that succeeds the current tip (no reorganization), - // there are no such headers. - // Secondly, and only when we are close to being synced, we request the announced block directly, - // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the - // time the block arrives, the header chain leading up to it is already validated. Not - // doing this will result in the received block being rejected as an orphan in case it is - // not a direct successor. - pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash); - CNodeState *nodestate = State(pfrom->GetId()); - if (CanDirectFetch(chainparams.GetConsensus()) && - nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER && - (!IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { - inv.type |= nFetchFlags; - if (nodestate->fSupportsDesiredCmpctVersion) - vToFetch.push_back(CInv(MSG_CMPCT_BLOCK, inv.hash)); - else - vToFetch.push_back(inv); - // Mark block as in flight already, even though the actual "getdata" message only goes out - // later (within the same cs_main lock, though). - MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus()); - } - LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); - } - } - else - { - pfrom->AddInventoryKnown(inv); - if (fBlocksOnly) - LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id); - else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) - pfrom->AskFor(inv); - } - - // Track requests for our stuff - GetMainSignals().Inventory(inv.hash); - - if (pfrom->nSendSize > (nMaxSendBufferSize * 2)) { - Misbehaving(pfrom->GetId(), 50); - return error("send buffer size() = %u", pfrom->nSendSize); - } - } - - if (!vToFetch.empty()) - pfrom->PushMessage(NetMsgType::GETDATA, vToFetch); - } - - - else if (strCommand == NetMsgType::GETDATA) - { - vector<CInv> vInv; - vRecv >> vInv; - if (vInv.size() > MAX_INV_SZ) - { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message getdata size() = %u", vInv.size()); - } - - if (fDebug || (vInv.size() != 1)) - LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); - - if ((fDebug && vInv.size() > 0) || (vInv.size() == 1)) - LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); - - pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); - ProcessGetData(pfrom, chainparams.GetConsensus(), connman); - } - - - else if (strCommand == NetMsgType::GETBLOCKS) - { - CBlockLocator locator; - uint256 hashStop; - vRecv >> locator >> hashStop; - - LOCK(cs_main); - - // Find the last block the caller has in the main chain - CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator); - - // Send the rest of the chain - if (pindex) - pindex = chainActive.Next(pindex); - int nLimit = 500; - LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); - for (; pindex; pindex = chainActive.Next(pindex)) - { - if (pindex->GetBlockHash() == hashStop) - { - LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); - break; - } - // If pruning, don't inv blocks unless we have on disk and are likely to still have - // for some reasonable time window (1 hour) that block relay might require. - const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; - if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) - { - LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); - break; - } - pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); - if (--nLimit <= 0) - { - // When this block is requested, we'll send an inv that'll - // trigger the peer to getblocks the next batch of inventory. - LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); - pfrom->hashContinue = pindex->GetBlockHash(); - break; - } - } - } - - - else if (strCommand == NetMsgType::GETBLOCKTXN) - { - BlockTransactionsRequest req; - vRecv >> req; - - BlockMap::iterator it = mapBlockIndex.find(req.blockhash); - if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) { - LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id); - return true; - } - - if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { - LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH); - return true; - } - - CBlock block; - assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus())); - - BlockTransactions resp(req); - for (size_t i = 0; i < req.indexes.size(); i++) { - if (req.indexes[i] >= block.vtx.size()) { - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id); - return true; - } - resp.txn[i] = block.vtx[req.indexes[i]]; - } - pfrom->PushMessageWithFlag(State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp); - } - - - else if (strCommand == NetMsgType::GETHEADERS) - { - CBlockLocator locator; - uint256 hashStop; - vRecv >> locator >> hashStop; - - LOCK(cs_main); - if (IsInitialBlockDownload() && !pfrom->fWhitelisted) { - LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id); - return true; - } - - CNodeState *nodestate = State(pfrom->GetId()); - CBlockIndex* pindex = NULL; - if (locator.IsNull()) - { - // If locator is null, return the hashStop block - BlockMap::iterator mi = mapBlockIndex.find(hashStop); - if (mi == mapBlockIndex.end()) - return true; - pindex = (*mi).second; - } - else - { - // Find the last block the caller has in the main chain - pindex = FindForkInGlobalIndex(chainActive, locator); - if (pindex) - pindex = chainActive.Next(pindex); - } - - // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end - vector<CBlock> vHeaders; - int nLimit = MAX_HEADERS_RESULTS; - LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id); - for (; pindex; pindex = chainActive.Next(pindex)) - { - vHeaders.push_back(pindex->GetBlockHeader()); - if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) - break; - } - // pindex can be NULL either if we sent chainActive.Tip() OR - // if our peer has chainActive.Tip() (and thus we are sending an empty - // headers message). In both cases it's safe to update - // pindexBestHeaderSent to be our tip. - nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); - pfrom->PushMessage(NetMsgType::HEADERS, vHeaders); - } - - - else if (strCommand == NetMsgType::TX) - { - // Stop processing the transaction early if - // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off - if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) - { - LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id); - return true; - } - - deque<COutPoint> vWorkQueue; - vector<uint256> vEraseQueue; - CTransaction tx; - vRecv >> tx; - - CInv inv(MSG_TX, tx.GetHash()); - pfrom->AddInventoryKnown(inv); - - LOCK(cs_main); - - bool fMissingInputs = false; - CValidationState state; - - pfrom->setAskFor.erase(inv.hash); - mapAlreadyAskedFor.erase(inv.hash); - - if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) { - mempool.check(pcoinsTip); - RelayTransaction(tx, connman); - for (unsigned int i = 0; i < tx.vout.size(); i++) { - vWorkQueue.emplace_back(inv.hash, i); - } - - pfrom->nLastTXTime = GetTime(); - - LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n", - pfrom->id, - tx.GetHash().ToString(), - mempool.size(), mempool.DynamicMemoryUsage() / 1000); - - // Recursively process any orphan transactions that depended on this one - set<NodeId> setMisbehaving; - while (!vWorkQueue.empty()) { - auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front()); - vWorkQueue.pop_front(); - if (itByPrev == mapOrphanTransactionsByPrev.end()) - continue; - for (auto mi = itByPrev->second.begin(); - mi != itByPrev->second.end(); - ++mi) - { - const CTransaction& orphanTx = (*mi)->second.tx; - const uint256& orphanHash = orphanTx.GetHash(); - NodeId fromPeer = (*mi)->second.fromPeer; - bool fMissingInputs2 = false; - // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan - // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get - // anyone relaying LegitTxX banned) - CValidationState stateDummy; - - - if (setMisbehaving.count(fromPeer)) - continue; - if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) { - LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); - RelayTransaction(orphanTx, connman); - for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { - vWorkQueue.emplace_back(orphanHash, i); - } - vEraseQueue.push_back(orphanHash); - } - else if (!fMissingInputs2) - { - int nDos = 0; - if (stateDummy.IsInvalid(nDos) && nDos > 0) - { - // Punish peer that gave us an invalid orphan tx - Misbehaving(fromPeer, nDos); - setMisbehaving.insert(fromPeer); - LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString()); - } - // Has inputs but not accepted to mempool - // Probably non-standard or insufficient fee/priority - LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString()); - vEraseQueue.push_back(orphanHash); - if (orphanTx.wit.IsNull() && !stateDummy.CorruptionPossible()) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. - assert(recentRejects); - recentRejects->insert(orphanHash); - } - } - mempool.check(pcoinsTip); - } - } - - BOOST_FOREACH(uint256 hash, vEraseQueue) - EraseOrphanTx(hash); - } - else if (fMissingInputs) - { - bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected - BOOST_FOREACH(const CTxIn& txin, tx.vin) { - if (recentRejects->contains(txin.prevout.hash)) { - fRejectedParents = true; - break; - } - } - if (!fRejectedParents) { - BOOST_FOREACH(const CTxIn& txin, tx.vin) { - CInv _inv(MSG_TX, txin.prevout.hash); - pfrom->AddInventoryKnown(_inv); - if (!AlreadyHave(_inv)) pfrom->AskFor(_inv); - } - AddOrphanTx(tx, pfrom->GetId()); - - // DoS prevention: do not allow mapOrphanTransactions to grow unbounded - unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); - unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); - if (nEvicted > 0) - LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted); - } else { - LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); - } - } else { - if (tx.wit.IsNull() && !state.CorruptionPossible()) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. - assert(recentRejects); - recentRejects->insert(tx.GetHash()); - } - - if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { - // Always relay transactions received from whitelisted peers, even - // if they were already in the mempool or rejected from it due - // to policy, allowing the node to function as a gateway for - // nodes hidden behind it. - // - // Never relay transactions that we would assign a non-zero DoS - // score for, as we expect peers to do the same with us in that - // case. - int nDoS = 0; - if (!state.IsInvalid(nDoS) || nDoS == 0) { - LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); - RelayTransaction(tx, connman); - } else { - LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); - } - } - } - int nDoS = 0; - if (state.IsInvalid(nDoS)) - { - LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), - pfrom->id, - FormatStateMessage(state)); - if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P - pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), - state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); - if (nDoS > 0) { - Misbehaving(pfrom->GetId(), nDoS); - } - } - FlushStateToDisk(state, FLUSH_STATE_PERIODIC); - } - - - else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing - { - CBlockHeaderAndShortTxIDs cmpctblock; - vRecv >> cmpctblock; - - LOCK(cs_main); - - if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { - // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers - if (!IsInitialBlockDownload()) - pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()); - return true; - } - - CBlockIndex *pindex = NULL; - CValidationState state; - if (!AcceptBlockHeader(cmpctblock.header, state, chainparams, &pindex)) { - int nDoS; - if (state.IsInvalid(nDoS)) { - if (nDoS > 0) - Misbehaving(pfrom->GetId(), nDoS); - LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id); - return true; - } - } - - // If AcceptBlockHeader returned true, it set pindex - assert(pindex); - UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); - - std::map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); - bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); - - if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here - return true; - - if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better - pindex->nTx != 0) { // We had this block at some point, but pruned it - if (fAlreadyInFlight) { - // We requested this block for some reason, but our mempool will probably be useless - // so we just grab the block via normal getdata - std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); - pfrom->PushMessage(NetMsgType::GETDATA, vInv); - } - return true; - } - - // If we're not close to tip yet, give up and let parallel block fetch work its magic - if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) - return true; - - CNodeState *nodestate = State(pfrom->GetId()); - - if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) { - // Don't bother trying to process compact blocks from v1 peers - // after segwit activates. - return true; - } - - // We want to be a bit conservative just to be extra careful about DoS - // possibilities in compact block processing... - if (pindex->nHeight <= chainActive.Height() + 2) { - if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || - (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) { - list<QueuedBlock>::iterator *queuedBlockIt = NULL; - if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) { - if (!(*queuedBlockIt)->partialBlock) - (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool)); - else { - // The block was already in flight using compact blocks from the same peer - LogPrint("net", "Peer sent us compact block we were already syncing!\n"); - return true; - } - } - - PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; - ReadStatus status = partialBlock.InitData(cmpctblock); - if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id); - return true; - } else if (status == READ_STATUS_FAILED) { - // Duplicate txindexes, the block is now in-flight, so just request it - std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); - pfrom->PushMessage(NetMsgType::GETDATA, vInv); - return true; - } - - if (!fAlreadyInFlight && mapBlocksInFlight.size() == 1 && pindex->pprev->IsValid(BLOCK_VALID_CHAIN)) { - // We seem to be rather well-synced, so it appears pfrom was the first to provide us - // with this block! Let's get them to announce using compact blocks in the future. - MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); - } - - BlockTransactionsRequest req; - for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { - if (!partialBlock.IsTxAvailable(i)) - req.indexes.push_back(i); - } - if (req.indexes.empty()) { - // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions) - BlockTransactions txn; - txn.blockhash = cmpctblock.header.GetHash(); - CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); - blockTxnMsg << txn; - return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman); - } else { - req.blockhash = pindex->GetBlockHash(); - pfrom->PushMessage(NetMsgType::GETBLOCKTXN, req); - } - } - } else { - if (fAlreadyInFlight) { - // We requested this block, but its far into the future, so our - // mempool will probably be useless - request the block normally - std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); - pfrom->PushMessage(NetMsgType::GETDATA, vInv); - return true; - } else { - // If this was an announce-cmpctblock, we want the same treatment as a header message - // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions) - std::vector<CBlock> headers; - headers.push_back(cmpctblock.header); - CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION); - vHeadersMsg << headers; - return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman); - } - } - - CheckBlockIndex(chainparams.GetConsensus()); - } - - else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing - { - BlockTransactions resp; - vRecv >> resp; - - LOCK(cs_main); - - map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash); - if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || - it->second.first != pfrom->GetId()) { - LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id); - return true; - } - - PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; - CBlock block; - ReadStatus status = partialBlock.FillBlock(block, resp.txn); - if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id); - return true; - } else if (status == READ_STATUS_FAILED) { - // Might have collided, fall back to getdata now :( - std::vector<CInv> invs; - invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); - pfrom->PushMessage(NetMsgType::GETDATA, invs); - } else { - CValidationState state; - ProcessNewBlock(state, chainparams, pfrom, &block, false, NULL); - int nDoS; - if (state.IsInvalid(nDoS)) { - assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes - pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), - state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash()); - if (nDoS > 0) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), nDoS); - } - } - } - } - - - else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing - { - std::vector<CBlockHeader> headers; - - // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. - unsigned int nCount = ReadCompactSize(vRecv); - if (nCount > MAX_HEADERS_RESULTS) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("headers message size = %u", nCount); - } - headers.resize(nCount); - for (unsigned int n = 0; n < nCount; n++) { - vRecv >> headers[n]; - ReadCompactSize(vRecv); // ignore tx count; assume it is 0. - } - - { - LOCK(cs_main); - - if (nCount == 0) { - // Nothing interesting. Stop asking this peers for more headers. - return true; + try { + uint64_t version; + file >> version; + if (version != MEMPOOL_DUMP_VERSION) { + return false; } - - CNodeState *nodestate = State(pfrom->GetId()); - - // If this looks like it could be a block announcement (nCount < - // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that - // don't connect: - // - Send a getheaders message in response to try to connect the chain. - // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that - // don't connect before giving DoS points - // - Once a headers message is received that is valid and does connect, - // nUnconnectingHeaders gets reset back to 0. - if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { - nodestate->nUnconnectingHeaders++; - pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()); - LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", - headers[0].GetHash().ToString(), - headers[0].hashPrevBlock.ToString(), - pindexBestHeader->nHeight, - pfrom->id, nodestate->nUnconnectingHeaders); - // Set hashLastUnknownBlock for this peer, so that if we - // eventually get the headers - even from a different peer - - // we can use this peer to download. - UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); - - if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { - Misbehaving(pfrom->GetId(), 20); + uint64_t num; + file >> num; + double prioritydummy = 0; + while (num--) { + int64_t nTime; + int64_t nFeeDelta; + CTransaction tx(deserialize, file); + file >> nTime; + file >> nFeeDelta; + + CAmount amountdelta = nFeeDelta; + if (amountdelta) { + mempool.PrioritiseTransaction(tx.GetHash(), tx.GetHash().ToString(), prioritydummy, amountdelta); } - return true; - } - - CBlockIndex *pindexLast = NULL; - BOOST_FOREACH(const CBlockHeader& header, headers) { CValidationState state; - if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) { - Misbehaving(pfrom->GetId(), 20); - return error("non-continuous headers sequence"); - } - if (!AcceptBlockHeader(header, state, chainparams, &pindexLast)) { - int nDoS; - if (state.IsInvalid(nDoS)) { - if (nDoS > 0) - Misbehaving(pfrom->GetId(), nDoS); - return error("invalid header received"); - } - } - } - - if (nodestate->nUnconnectingHeaders > 0) { - LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders); - } - nodestate->nUnconnectingHeaders = 0; - - assert(pindexLast); - UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); - - if (nCount == MAX_HEADERS_RESULTS) { - // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue - // from there instead. - LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); - pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()); - } - - bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); - // If this set of headers is valid and ends in a block with at least as - // much work as our tip, download as much as possible. - if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { - vector<CBlockIndex *> vToFetch; - CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. - while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && - (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { - // We don't have this block, and it's not yet in flight. - vToFetch.push_back(pindexWalk); - } - pindexWalk = pindexWalk->pprev; - } - // If pindexWalk still isn't on our main chain, we're looking at a - // very large reorg at a time we think we're close to caught up to - // the main chain -- this shouldn't really happen. Bail out on the - // direct fetch and rely on parallel download instead. - if (!chainActive.Contains(pindexWalk)) { - LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); - } else { - vector<CInv> vGetData; - // Download as much as possible, from earliest to latest. - BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) { - if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - // Can't download any more from this peer - break; - } - uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()); - vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); - LogPrint("net", "Requesting block %s from peer=%d\n", - pindex->GetBlockHash().ToString(), pfrom->id); - } - if (vGetData.size() > 1) { - LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); - } - if (vGetData.size() > 0) { - if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { - // We seem to be rather well-synced, so it appears pfrom was the first to provide us - // with this block! Let's get them to announce using compact blocks in the future. - MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); - // In any case, we want to download using a compact block, not a regular one - vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); - } - pfrom->PushMessage(NetMsgType::GETDATA, vGetData); - } - } - } - - CheckBlockIndex(chainparams.GetConsensus()); - } - - NotifyHeaderTip(); - } - - else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing - { - CBlock block; - vRecv >> block; - - LogPrint("net", "received block %s peer=%d\n", block.GetHash().ToString(), pfrom->id); - - CValidationState state; - // Process all blocks from whitelisted peers, even if not requested, - // unless we're still syncing with the network. - // Such an unrequested block may still be processed, subject to the - // conditions in AcceptBlock(). - bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); - ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL); - int nDoS; - if (state.IsInvalid(nDoS)) { - assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes - pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), - state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash()); - if (nDoS > 0) { + if (nTime + nExpiryTimeout > nNow) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), nDoS); - } - } - - } - - - else if (strCommand == NetMsgType::GETADDR) - { - // This asymmetric behavior for inbound and outbound connections was introduced - // to prevent a fingerprinting attack: an attacker can send specific fake addresses - // to users' AddrMan and later request them by sending getaddr messages. - // Making nodes which are behind NAT and can only make outgoing connections ignore - // the getaddr message mitigates the attack. - if (!pfrom->fInbound) { - LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id); - return true; - } - - // Only send one GetAddr response per connection to reduce resource waste - // and discourage addr stamping of INV announcements. - if (pfrom->fSentAddr) { - LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id); - return true; - } - pfrom->fSentAddr = true; - - pfrom->vAddrToSend.clear(); - vector<CAddress> vAddr = connman.GetAddresses(); - FastRandomContext insecure_rand; - BOOST_FOREACH(const CAddress &addr, vAddr) - pfrom->PushAddress(addr, insecure_rand); - } - - - else if (strCommand == NetMsgType::MEMPOOL) - { - if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted) - { - LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId()); - pfrom->fDisconnect = true; - return true; - } - - if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted) - { - LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId()); - pfrom->fDisconnect = true; - return true; - } - - LOCK(pfrom->cs_inventory); - pfrom->fSendMempool = true; - } - - - else if (strCommand == NetMsgType::PING) - { - if (pfrom->nVersion > BIP0031_VERSION) - { - uint64_t nonce = 0; - vRecv >> nonce; - // Echo the message back with the nonce. This allows for two useful features: - // - // 1) A remote node can quickly check if the connection is operational - // 2) Remote nodes can measure the latency of the network thread. If this node - // is overloaded it won't respond to pings quickly and the remote node can - // avoid sending us more work, like chain download requests. - // - // The nonce stops the remote getting confused between different pings: without - // it, if the remote node sends a ping once per second and this node takes 5 - // seconds to respond to each, the 5th ping the remote sends would appear to - // return very quickly. - pfrom->PushMessage(NetMsgType::PONG, nonce); - } - } - - - else if (strCommand == NetMsgType::PONG) - { - int64_t pingUsecEnd = nTimeReceived; - uint64_t nonce = 0; - size_t nAvail = vRecv.in_avail(); - bool bPingFinished = false; - std::string sProblem; - - if (nAvail >= sizeof(nonce)) { - vRecv >> nonce; - - // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) - if (pfrom->nPingNonceSent != 0) { - if (nonce == pfrom->nPingNonceSent) { - // Matching pong received, this ping is no longer outstanding - bPingFinished = true; - int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; - if (pingUsecTime > 0) { - // Successful ping time measurement, replace previous - pfrom->nPingUsecTime = pingUsecTime; - pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime); - } else { - // This should never happen - sProblem = "Timing mishap"; - } + AcceptToMemoryPoolWithTime(mempool, state, tx, true, NULL, nTime); + if (state.IsValid()) { + ++count; } else { - // Nonce mismatches are normal when pings are overlapping - sProblem = "Nonce mismatch"; - if (nonce == 0) { - // This is most likely a bug in another implementation somewhere; cancel this ping - bPingFinished = true; - sProblem = "Nonce zero"; - } + ++failed; } } else { - sProblem = "Unsolicited pong without ping"; - } - } else { - // This is most likely a bug in another implementation somewhere; cancel this ping - bPingFinished = true; - sProblem = "Short payload"; - } - - if (!(sProblem.empty())) { - LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n", - pfrom->id, - sProblem, - pfrom->nPingNonceSent, - nonce, - nAvail); - } - if (bPingFinished) { - pfrom->nPingNonceSent = 0; - } - } - - - else if (strCommand == NetMsgType::FILTERLOAD) - { - CBloomFilter filter; - vRecv >> filter; - - if (!filter.IsWithinSizeConstraints()) - { - // There is no excuse for sending a too-large filter - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 100); - } - else - { - LOCK(pfrom->cs_filter); - delete pfrom->pfilter; - pfrom->pfilter = new CBloomFilter(filter); - pfrom->pfilter->UpdateEmptyFull(); - pfrom->fRelayTxes = true; - } - } - - - else if (strCommand == NetMsgType::FILTERADD) - { - vector<unsigned char> vData; - vRecv >> vData; - - // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object, - // and thus, the maximum size any matched object can have) in a filteradd message - bool bad = false; - if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { - bad = true; - } else { - LOCK(pfrom->cs_filter); - if (pfrom->pfilter) { - pfrom->pfilter->insert(vData); - } else { - bad = true; - } - } - if (bad) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), 100); - } - } - - - else if (strCommand == NetMsgType::FILTERCLEAR) - { - LOCK(pfrom->cs_filter); - delete pfrom->pfilter; - pfrom->pfilter = new CBloomFilter(); - pfrom->fRelayTxes = true; - } - - - else if (strCommand == NetMsgType::REJECT) - { - if (fDebug) { - try { - string strMsg; unsigned char ccode; string strReason; - vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); - - ostringstream ss; - ss << strMsg << " code " << itostr(ccode) << ": " << strReason; - - if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) - { - uint256 hash; - vRecv >> hash; - ss << ": hash " << hash.ToString(); - } - LogPrint("net", "Reject %s\n", SanitizeString(ss.str())); - } catch (const std::ios_base::failure&) { - // Avoid feedback loops by preventing reject messages from triggering a new reject message. - LogPrint("net", "Unparseable reject message received\n"); + ++skipped; } } - } + std::map<uint256, CAmount> mapDeltas; + file >> mapDeltas; - else if (strCommand == NetMsgType::FEEFILTER) { - CAmount newFeeFilter = 0; - vRecv >> newFeeFilter; - if (MoneyRange(newFeeFilter)) { - { - LOCK(pfrom->cs_feeFilter); - pfrom->minFeeFilter = newFeeFilter; - } - LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id); + for (const auto& i : mapDeltas) { + mempool.PrioritiseTransaction(i.first, i.first.ToString(), prioritydummy, i.second); } + } catch (const std::exception& e) { + LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what()); + return false; } - else if (strCommand == NetMsgType::NOTFOUND) { - // We do not care about the NOTFOUND message, but logging an Unknown Command - // message would be undesirable as we transmit it ourselves. - } - - else { - // Ignore unknown commands for extensibility - LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id); - } - - - + LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count, failed, skipped); return true; } -// requires LOCK(cs_vRecvMsg) -bool ProcessMessages(CNode* pfrom, CConnman& connman) +void DumpMempool(void) { - const CChainParams& chainparams = Params(); - unsigned int nMaxSendBufferSize = connman.GetSendBufferSize(); - //if (fDebug) - // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size()); - - // - // Message format - // (4) message start - // (12) command - // (4) size - // (4) checksum - // (x) data - // - bool fOk = true; - - if (!pfrom->vRecvGetData.empty()) - ProcessGetData(pfrom, chainparams.GetConsensus(), connman); - - // this maintains the order of responses - if (!pfrom->vRecvGetData.empty()) return fOk; - - std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin(); - while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) { - // Don't bother if send buffer is too full to respond anyway - if (pfrom->nSendSize >= nMaxSendBufferSize) - break; - - // get next message - CNetMessage& msg = *it; - - //if (fDebug) - // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__, - // msg.hdr.nMessageSize, msg.vRecv.size(), - // msg.complete() ? "Y" : "N"); - - // end, if an incomplete message is found - if (!msg.complete()) - break; - - // at this point, any failure means we can delete the current message - it++; + int64_t start = GetTimeMicros(); - // Scan for message start - if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { - LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); - fOk = false; - break; - } - - // Read header - CMessageHeader& hdr = msg.hdr; - if (!hdr.IsValid(chainparams.MessageStart())) - { - LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); - continue; - } - string strCommand = hdr.GetCommand(); - - // Message size - unsigned int nMessageSize = hdr.nMessageSize; - - // Checksum - CDataStream& vRecv = msg.vRecv; - uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize); - if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) - { - LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, - SanitizeString(strCommand), nMessageSize, - HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), - HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); - continue; - } + std::map<uint256, CAmount> mapDeltas; + std::vector<TxMempoolInfo> vinfo; - // Process message - bool fRet = false; - try - { - fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman); - boost::this_thread::interruption_point(); - } - catch (const std::ios_base::failure& e) - { - pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message")); - if (strstr(e.what(), "end of data")) - { - // Allow exceptions from under-length message on vRecv - LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); - } - else if (strstr(e.what(), "size too large")) - { - // Allow exceptions from over-long size - LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); - } - else if (strstr(e.what(), "non-canonical ReadCompactSize()")) - { - // Allow exceptions from non-canonical encoding - LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); - } - else - { - PrintExceptionContinue(&e, "ProcessMessages()"); - } - } - catch (const boost::thread_interrupted&) { - throw; - } - catch (const std::exception& e) { - PrintExceptionContinue(&e, "ProcessMessages()"); - } catch (...) { - PrintExceptionContinue(NULL, "ProcessMessages()"); - } - - if (!fRet) - LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id); - - break; - } - - // In case the connection got shut down, its receive buffer was wiped - if (!pfrom->fDisconnect) - pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it); - - return fOk; -} - -class CompareInvMempoolOrder -{ - CTxMemPool *mp; -public: - CompareInvMempoolOrder(CTxMemPool *_mempool) { - mp = _mempool; - } - - bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) - { - /* As std::make_heap produces a max-heap, we want the entries with the - * fewest ancestors/highest fee to sort later. */ - return mp->CompareDepthAndScore(*b, *a); - } -}; - -bool SendMessages(CNode* pto, CConnman& connman) -{ - const Consensus::Params& consensusParams = Params().GetConsensus(); - { - // Don't send anything until we get its version message - if (pto->nVersion == 0) - return true; - - // - // Message: ping - // - bool pingSend = false; - if (pto->fPingQueued) { - // RPC ping request by user - pingSend = true; - } - if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { - // Ping automatically sent as a latency probe & keepalive. - pingSend = true; - } - if (pingSend && !pto->fDisconnect) { - uint64_t nonce = 0; - while (nonce == 0) { - GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); - } - pto->fPingQueued = false; - pto->nPingUsecStart = GetTimeMicros(); - if (pto->nVersion > BIP0031_VERSION) { - pto->nPingNonceSent = nonce; - pto->PushMessage(NetMsgType::PING, nonce); - } else { - // Peer is too old to support ping command with nonce, pong will never arrive. - pto->nPingNonceSent = 0; - pto->PushMessage(NetMsgType::PING); - } - } - - TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState() - if (!lockMain) - return true; - - // Address refresh broadcast - int64_t nNow = GetTimeMicros(); - if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) { - AdvertiseLocal(pto); - pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); - } - - // - // Message: addr - // - if (pto->nNextAddrSend < nNow) { - pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL); - vector<CAddress> vAddr; - vAddr.reserve(pto->vAddrToSend.size()); - BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend) - { - if (!pto->addrKnown.contains(addr.GetKey())) - { - pto->addrKnown.insert(addr.GetKey()); - vAddr.push_back(addr); - // receiver rejects addr messages larger than 1000 - if (vAddr.size() >= 1000) - { - pto->PushMessage(NetMsgType::ADDR, vAddr); - vAddr.clear(); - } - } - } - pto->vAddrToSend.clear(); - if (!vAddr.empty()) - pto->PushMessage(NetMsgType::ADDR, vAddr); - // we only send the big addr message once - if (pto->vAddrToSend.capacity() > 40) - pto->vAddrToSend.shrink_to_fit(); - } - - CNodeState &state = *State(pto->GetId()); - if (state.fShouldBan) { - if (pto->fWhitelisted) - LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString()); - else { - pto->fDisconnect = true; - if (pto->addr.IsLocal()) - LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString()); - else - { - connman.Ban(pto->addr, BanReasonNodeMisbehaving); - } - } - state.fShouldBan = false; - } - - BOOST_FOREACH(const CBlockReject& reject, state.rejects) - pto->PushMessage(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock); - state.rejects.clear(); - - // Start block sync - if (pindexBestHeader == NULL) - pindexBestHeader = chainActive.Tip(); - bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. - if (!state.fSyncStarted && !pto->fClient && !pto->fDisconnect && !fImporting && !fReindex) { - // Only actively request headers from a single peer, unless we're close to today. - if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { - state.fSyncStarted = true; - nSyncStarted++; - const CBlockIndex *pindexStart = pindexBestHeader; - /* If possible, start at the block preceding the currently - best known header. This ensures that we always get a - non-empty list of headers back as long as the peer - is up-to-date. With a non-empty response, we can initialise - the peer's known best block. This wouldn't be possible - if we requested starting at pindexBestHeader and - got back an empty response. */ - if (pindexStart->pprev) - pindexStart = pindexStart->pprev; - LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); - pto->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()); - } + LOCK(mempool.cs); + for (const auto &i : mempool.mapDeltas) { + mapDeltas[i.first] = i.second.first; } + vinfo = mempool.infoAll(); + } - // Resend wallet transactions that haven't gotten in a block yet - // Except during reindex, importing and IBD, when old wallet - // transactions become unconfirmed and spams other nodes. - if (!fReindex && !fImporting && !IsInitialBlockDownload()) - { - GetMainSignals().Broadcast(nTimeBestReceived, &connman); - } + int64_t mid = GetTimeMicros(); - // - // Try sending block announcements via headers - // - { - // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our - // list of block hashes we're relaying, and our peer wants - // headers announcements, then find the first header - // not yet known to our peer but would connect, and send. - // If no header would connect, or if we have too many - // blocks, or if the peer doesn't want headers, just - // add all to the inv queue. - LOCK(pto->cs_inventory); - vector<CBlock> vHeaders; - bool fRevertToInv = ((!state.fPreferHeaders && - (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || - pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); - CBlockIndex *pBestIndex = NULL; // last header queued for delivery - ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date - - if (!fRevertToInv) { - bool fFoundStartingHeader = false; - // Try to find first header that our peer doesn't have, and - // then send all headers past that one. If we come across any - // headers that aren't on chainActive, give up. - BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) { - BlockMap::iterator mi = mapBlockIndex.find(hash); - assert(mi != mapBlockIndex.end()); - CBlockIndex *pindex = mi->second; - if (chainActive[pindex->nHeight] != pindex) { - // Bail out if we reorged away from this block - fRevertToInv = true; - break; - } - if (pBestIndex != NULL && pindex->pprev != pBestIndex) { - // This means that the list of blocks to announce don't - // connect to each other. - // This shouldn't really be possible to hit during - // regular operation (because reorgs should take us to - // a chain that has some block not on the prior chain, - // which should be caught by the prior check), but one - // way this could happen is by using invalidateblock / - // reconsiderblock repeatedly on the tip, causing it to - // be added multiple times to vBlockHashesToAnnounce. - // Robustly deal with this rare situation by reverting - // to an inv. - fRevertToInv = true; - break; - } - pBestIndex = pindex; - if (fFoundStartingHeader) { - // add this to the headers message - vHeaders.push_back(pindex->GetBlockHeader()); - } else if (PeerHasHeader(&state, pindex)) { - continue; // keep looking for the first new block - } else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) { - // Peer doesn't have this header but they do have the prior one. - // Start sending headers. - fFoundStartingHeader = true; - vHeaders.push_back(pindex->GetBlockHeader()); - } else { - // Peer doesn't have this header or the prior one -- nothing will - // connect, so bail out. - fRevertToInv = true; - break; - } - } - } - if (!fRevertToInv && !vHeaders.empty()) { - if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { - // We only send up to 1 block as header-and-ids, as otherwise - // probably means we're doing an initial-ish-sync or they're slow - LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__, - vHeaders.front().GetHash().ToString(), pto->id); - //TODO: Shouldn't need to reload block from disk, but requires refactor - CBlock block; - assert(ReadBlockFromDisk(block, pBestIndex, consensusParams)); - CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness); - pto->PushMessageWithFlag(state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); - state.pindexBestHeaderSent = pBestIndex; - } else if (state.fPreferHeaders) { - if (vHeaders.size() > 1) { - LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, - vHeaders.size(), - vHeaders.front().GetHash().ToString(), - vHeaders.back().GetHash().ToString(), pto->id); - } else { - LogPrint("net", "%s: sending header %s to peer=%d\n", __func__, - vHeaders.front().GetHash().ToString(), pto->id); - } - pto->PushMessage(NetMsgType::HEADERS, vHeaders); - state.pindexBestHeaderSent = pBestIndex; - } else - fRevertToInv = true; - } - if (fRevertToInv) { - // If falling back to using an inv, just try to inv the tip. - // The last entry in vBlockHashesToAnnounce was our tip at some point - // in the past. - if (!pto->vBlockHashesToAnnounce.empty()) { - const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); - BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); - assert(mi != mapBlockIndex.end()); - CBlockIndex *pindex = mi->second; - - // Warn if we're announcing a block that is not on the main chain. - // This should be very rare and could be optimized out. - // Just log for now. - if (chainActive[pindex->nHeight] != pindex) { - LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n", - hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); - } - - // If the peer's chain has this block, don't inv it back. - if (!PeerHasHeader(&state, pindex)) { - pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); - LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__, - pto->id, hashToAnnounce.ToString()); - } - } - } - pto->vBlockHashesToAnnounce.clear(); + try { + FILE* filestr = fopen((GetDataDir() / "mempool.dat.new").string().c_str(), "w"); + if (!filestr) { + return; } - // - // Message: inventory - // - vector<CInv> vInv; - { - LOCK(pto->cs_inventory); - vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX)); - - // Add blocks - BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) { - vInv.push_back(CInv(MSG_BLOCK, hash)); - if (vInv.size() == MAX_INV_SZ) { - pto->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } - } - pto->vInventoryBlockToSend.clear(); - - // Check whether periodic sends should happen - bool fSendTrickle = pto->fWhitelisted; - if (pto->nNextInvSend < nNow) { - fSendTrickle = true; - // Use half the delay for outbound peers, as there is less privacy concern for them. - pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); - } - - // Time to send but the peer has requested we not relay transactions. - if (fSendTrickle) { - LOCK(pto->cs_filter); - if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear(); - } - - // Respond to BIP35 mempool requests - if (fSendTrickle && pto->fSendMempool) { - auto vtxinfo = mempool.infoAll(); - pto->fSendMempool = false; - CAmount filterrate = 0; - { - LOCK(pto->cs_feeFilter); - filterrate = pto->minFeeFilter; - } - - LOCK(pto->cs_filter); - - for (const auto& txinfo : vtxinfo) { - const uint256& hash = txinfo.tx->GetHash(); - CInv inv(MSG_TX, hash); - pto->setInventoryTxToSend.erase(hash); - if (filterrate) { - if (txinfo.feeRate.GetFeePerK() < filterrate) - continue; - } - if (pto->pfilter) { - if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; - } - pto->filterInventoryKnown.insert(hash); - vInv.push_back(inv); - if (vInv.size() == MAX_INV_SZ) { - pto->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } - } - pto->timeLastMempoolReq = GetTime(); - } - - // Determine transactions to relay - if (fSendTrickle) { - // Produce a vector with all candidates for sending - vector<std::set<uint256>::iterator> vInvTx; - vInvTx.reserve(pto->setInventoryTxToSend.size()); - for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { - vInvTx.push_back(it); - } - CAmount filterrate = 0; - { - LOCK(pto->cs_feeFilter); - filterrate = pto->minFeeFilter; - } - // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. - // A heap is used so that not all items need sorting if only a few are being sent. - CompareInvMempoolOrder compareInvMempoolOrder(&mempool); - std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); - // No reason to drain out at many times the network's capacity, - // especially since we have many peers and some will draw much shorter delays. - unsigned int nRelayedTransactions = 0; - LOCK(pto->cs_filter); - while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { - // Fetch the top element from the heap - std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); - std::set<uint256>::iterator it = vInvTx.back(); - vInvTx.pop_back(); - uint256 hash = *it; - // Remove it from the to-be-sent set - pto->setInventoryTxToSend.erase(it); - // Check if not in the filter already - if (pto->filterInventoryKnown.contains(hash)) { - continue; - } - // Not in the mempool anymore? don't bother sending it. - auto txinfo = mempool.info(hash); - if (!txinfo.tx) { - continue; - } - if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) { - continue; - } - if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; - // Send - vInv.push_back(CInv(MSG_TX, hash)); - nRelayedTransactions++; - { - // Expire old relay messages - while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) - { - mapRelay.erase(vRelayExpiration.front().second); - vRelayExpiration.pop_front(); - } + CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); - auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx))); - if (ret.second) { - vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first)); - } - } - if (vInv.size() == MAX_INV_SZ) { - pto->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } - pto->filterInventoryKnown.insert(hash); - } - } - } - if (!vInv.empty()) - pto->PushMessage(NetMsgType::INV, vInv); - - // Detect whether we're stalling - nNow = GetTimeMicros(); - if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { - // Stalling only triggers when the block download window cannot move. During normal steady state, - // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection - // should only happen during initial block download. - LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); - pto->fDisconnect = true; - } - // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval - // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. - // We compensate for other peers to prevent killing off peers due to our own downstream link - // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes - // to unreasonably increase our timeout. - if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) { - QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); - int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); - if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { - LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); - pto->fDisconnect = true; - } - } + uint64_t version = MEMPOOL_DUMP_VERSION; + file << version; - // - // Message: getdata (blocks) - // - vector<CInv> vGetData; - if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - vector<CBlockIndex*> vToDownload; - NodeId staller = -1; - FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); - BOOST_FOREACH(CBlockIndex *pindex, vToDownload) { - uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams); - vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); - LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), - pindex->nHeight, pto->id); - } - if (state.nBlocksInFlight == 0 && staller != -1) { - if (State(staller)->nStallingSince == 0) { - State(staller)->nStallingSince = nNow; - LogPrint("net", "Stall started peer=%d\n", staller); - } - } + file << (uint64_t)vinfo.size(); + for (const auto& i : vinfo) { + file << *(i.tx); + file << (int64_t)i.nTime; + file << (int64_t)i.nFeeDelta; + mapDeltas.erase(i.tx->GetHash()); } - // - // Message: getdata (non-blocks) - // - while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) - { - const CInv& inv = (*pto->mapAskFor.begin()).second; - if (!AlreadyHave(inv)) - { - if (fDebug) - LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id); - vGetData.push_back(inv); - if (vGetData.size() >= 1000) - { - pto->PushMessage(NetMsgType::GETDATA, vGetData); - vGetData.clear(); - } - } else { - //If we're not going to ask, don't expect a response. - pto->setAskFor.erase(inv.hash); - } - pto->mapAskFor.erase(pto->mapAskFor.begin()); - } - if (!vGetData.empty()) - pto->PushMessage(NetMsgType::GETDATA, vGetData); - - // - // Message: feefilter - // - // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay - if (pto->nVersion >= FEEFILTER_VERSION && GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && - !(pto->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) { - CAmount currentFilter = mempool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); - int64_t timeNow = GetTimeMicros(); - if (timeNow > pto->nextSendTimeFeeFilter) { - CAmount filterToSend = filterRounder.round(currentFilter); - if (filterToSend != pto->lastSentFeeFilter) { - pto->PushMessage(NetMsgType::FEEFILTER, filterToSend); - pto->lastSentFeeFilter = filterToSend; - } - pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); - } - // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY - // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. - else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter && - (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) { - pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; - } - } + file << mapDeltas; + FileCommit(file.Get()); + file.fclose(); + RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat"); + int64_t last = GetTimeMicros(); + LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*0.000001, (last-mid)*0.000001); + } catch (const std::exception& e) { + LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what()); } - return true; -} - - std::string CBlockFileInfo::ToString() const { - return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); - } - -ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos) -{ - LOCK(cs_main); - return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache); -} - -int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos) -{ - LOCK(cs_main); - return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache); } class CMainCleanup @@ -6934,9 +4190,5 @@ public: for (; it1 != mapBlockIndex.end(); it1++) delete (*it1).second; mapBlockIndex.clear(); - - // orphan transactions - mapOrphanTransactions.clear(); - mapOrphanTransactionsByPrev.clear(); } } instance_of_cmaincleanup; diff --git a/src/main.h b/src/validation.h index 3eab9b89da..e50a82b9dd 100644 --- a/src/main.h +++ b/src/validation.h @@ -3,8 +3,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_MAIN_H -#define BITCOIN_MAIN_H +#ifndef BITCOIN_VALIDATION_H +#define BITCOIN_VALIDATION_H #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" @@ -13,10 +13,9 @@ #include "amount.h" #include "chain.h" #include "coins.h" -#include "net.h" +#include "protocol.h" // For CMessageHeader::MessageStartChars #include "script/script_error.h" #include "sync.h" -#include "validationinterface.h" #include "versionbits.h" #include <algorithm> @@ -28,7 +27,10 @@ #include <utility> #include <vector> +#include <atomic> + #include <boost/unordered_map.hpp> +#include <boost/filesystem/path.hpp> class CBlockIndex; class CBlockTreeDB; @@ -125,7 +127,7 @@ static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000; /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */ static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000; -static const unsigned int DEFAULT_LIMITFREERELAY = 15; +static const unsigned int DEFAULT_LIMITFREERELAY = 0; static const bool DEFAULT_RELAYPRIORITY = true; static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60; @@ -165,7 +167,7 @@ extern uint64_t nLastBlockWeight; extern const std::string strMessageMagic; extern CWaitableCriticalSection csBestBlock; extern CConditionVariable cvBlockChange; -extern bool fImporting; +extern std::atomic_bool fImporting; extern bool fReindex; extern int nScriptCheckThreads; extern bool fTxIndex; @@ -215,15 +217,35 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024; * Process an incoming block. This only returns after the best known valid * block is made active. Note that it does not, however, guarantee that the * specific block passed to it has been checked for validity! + * + * If you want to *possibly* get feedback on whether pblock is valid, you must + * install a CValidationInterface (see validationinterface.h) - this will have + * its BlockChecked method called whenever *any* block completes validation. + * + * Note that we guarantee that either the proof-of-work is valid on pblock, or + * (and possibly also) BlockChecked will have been called. * - * @param[out] state This may be set to an Error state if any error occurred processing it, including during validation/connection/etc of otherwise unrelated blocks during reorganization; or it may be set to an Invalid state if pblock is itself invalid (but this is not guaranteed even when the block is checked). If you want to *possibly* get feedback on whether pblock is valid, you must also install a CValidationInterface (see validationinterface.h) - this will have its BlockChecked method called whenever *any* block completes validation. - * @param[in] pfrom The node which we are receiving the block from; it is added to mapBlockSource and may be penalised if the block is invalid. + * Call without cs_main held. + * * @param[in] pblock The block we want to process. * @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers. - * @param[out] dbp The already known disk position of pblock, or NULL if not yet stored. + * @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call * @return True if state.IsValid() */ -bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp); +bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock); + +/** + * Process incoming block headers. + * + * Call without cs_main held. + * + * @param[in] block The block headers themselves + * @param[out] state This may be set to an Error state if any error occurred processing them + * @param[in] chainparams The params for the chain we want to connect to + * @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers + */ +bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL); + /** Check whether enough disk space is available for an incoming block */ bool CheckDiskSpace(uint64_t nAdditionalBytes = 0); /** Open a block file (blk?????.dat) */ @@ -237,7 +259,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB /** Initialize a new block tree database + block data on disk */ bool InitBlockIndex(const CChainParams& chainparams); /** Load the block tree and coins database from disk */ -bool LoadBlockIndex(); +bool LoadBlockIndex(const CChainParams& chainparams); /** Unload database information */ void UnloadBlockIndex(); /** Run an instance of the script checking thread */ @@ -253,9 +275,9 @@ bool IsInitialBlockDownload(); */ std::string GetWarnings(const std::string& strFor); /** Retrieve a transaction (from memory pool, or from disk, if possible) */ -bool GetTransaction(const uint256 &hash, CTransaction &tx, const Consensus::Params& params, uint256 &hashBlock, bool fAllowSlow = false); +bool GetTransaction(const uint256 &hash, CTransactionRef &tx, const Consensus::Params& params, uint256 &hashBlock, bool fAllowSlow = false); /** Find the best known block, and make it the tip of the block chain */ -bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock = NULL); +bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>()); CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams); /** @@ -291,6 +313,10 @@ void PruneAndFlush(); bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, bool* pfMissingInputs, bool fOverrideMempoolLimit=false, const CAmount nAbsurdFee=0); +/** (try to) add transaction to memory pool with a specified acceptance time **/ +bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, + bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit=false, const CAmount nAbsurdFee=0); + /** Convert CValidationState to a human-readable message for logging */ std::string FormatStateMessage(const CValidationState &state); @@ -339,7 +365,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight); /** Transaction validation functions */ /** Context-independent validity checks */ -bool CheckTransaction(const CTransaction& tx, CValidationState& state); +bool CheckTransaction(const CTransaction& tx, CValidationState& state, bool fCheckDuplicateInputs=true); namespace Consensus { @@ -529,45 +555,10 @@ static const unsigned int REJECT_ALREADY_KNOWN = 0x101; /** Transaction conflicts with a transaction already known */ static const unsigned int REJECT_CONFLICT = 0x102; -// The following things handle network-processing logic -// (and should be moved to a separate file) - -/** Register with a network node to receive its signals */ -void RegisterNodeSignals(CNodeSignals& nodeSignals); -/** Unregister a network node */ -void UnregisterNodeSignals(CNodeSignals& nodeSignals); +/** Dump the mempool to disk. */ +void DumpMempool(); -class PeerLogicValidation : public CValidationInterface { -private: - CConnman* connman; - -public: - PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {} - - virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload); - virtual void BlockChecked(const CBlock& block, const CValidationState& state); -}; - -struct CNodeStateStats { - int nMisbehavior; - int nSyncHeight; - int nCommonHeight; - std::vector<int> vHeightInFlight; -}; - -/** Get statistics from node state */ -bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats); -/** Increase a node's misbehavior score. */ -void Misbehaving(NodeId nodeid, int howmuch); - -/** Process protocol messages received from a given node */ -bool ProcessMessages(CNode* pfrom, CConnman& connman); -/** - * Send queued protocol messages to be sent to a give node. - * - * @param[in] pto The node which we are sending messages to. - * @param[in] connman The connection manager for that node. - */ -bool SendMessages(CNode* pto, CConnman& connman); +/** Load the mempool from disk. */ +bool LoadMempool(); -#endif // BITCOIN_MAIN_H +#endif // BITCOIN_VALIDATION_H diff --git a/src/version.h b/src/version.h index 87bd655066..87fb1a3a75 100644 --- a/src/version.h +++ b/src/version.h @@ -9,7 +9,7 @@ * network protocol versioning */ -static const int PROTOCOL_VERSION = 70014; +static const int PROTOCOL_VERSION = 70015; //! initial proto version, to be increased after version/verack negotiation static const int INIT_PROTO_VERSION = 209; @@ -42,4 +42,7 @@ static const int FEEFILTER_VERSION = 70013; //! short-id-based block download starts with this version static const int SHORT_IDS_BLOCKS_VERSION = 70014; +//! not banning for invalid compact blocks starts with this version +static const int INVALID_CB_NO_BAN_VERSION = 70015; + #endif // BITCOIN_VERSION_H diff --git a/src/coincontrol.h b/src/wallet/coincontrol.h index e33adc4d2b..08d23688ff 100644 --- a/src/coincontrol.h +++ b/src/wallet/coincontrol.h @@ -2,8 +2,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_COINCONTROL_H -#define BITCOIN_COINCONTROL_H +#ifndef BITCOIN_WALLET_COINCONTROL_H +#define BITCOIN_WALLET_COINCONTROL_H #include "primitives/transaction.h" @@ -22,6 +22,8 @@ public: bool fOverrideFeeRate; //! Feerate to use if overrideFeeRate is true CFeeRate nFeeRate; + //! Override the default confirmation target, 0 = use default + int nConfirmTarget; CCoinControl() { @@ -37,6 +39,7 @@ public: nMinimumTotalFee = 0; nFeeRate = CFeeRate(0); fOverrideFeeRate = false; + nConfirmTarget = 0; } bool HasSelected() const @@ -73,4 +76,4 @@ private: std::set<COutPoint> setSelected; }; -#endif // BITCOIN_COINCONTROL_H +#endif // BITCOIN_WALLET_COINCONTROL_H diff --git a/src/wallet/crypter.cpp b/src/wallet/crypter.cpp index 190f8ecf2a..31ee060677 100644 --- a/src/wallet/crypter.cpp +++ b/src/wallet/crypter.cpp @@ -48,12 +48,12 @@ bool CCrypter::SetKeyFromPassphrase(const SecureString& strKeyData, const std::v int i = 0; if (nDerivationMethod == 0) - i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, chKey, chIV); + i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, vchKey.data(), vchIV.data()); if (i != (int)WALLET_CRYPTO_KEY_SIZE) { - memory_cleanse(chKey, sizeof(chKey)); - memory_cleanse(chIV, sizeof(chIV)); + memory_cleanse(vchKey.data(), vchKey.size()); + memory_cleanse(vchIV.data(), vchIV.size()); return false; } @@ -66,8 +66,8 @@ bool CCrypter::SetKey(const CKeyingMaterial& chNewKey, const std::vector<unsigne if (chNewKey.size() != WALLET_CRYPTO_KEY_SIZE || chNewIV.size() != WALLET_CRYPTO_IV_SIZE) return false; - memcpy(&chKey[0], &chNewKey[0], sizeof chKey); - memcpy(&chIV[0], &chNewIV[0], sizeof chIV); + memcpy(vchKey.data(), chNewKey.data(), chNewKey.size()); + memcpy(vchIV.data(), chNewIV.data(), chNewIV.size()); fKeySet = true; return true; @@ -82,7 +82,7 @@ bool CCrypter::Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned // n + AES_BLOCKSIZE bytes vchCiphertext.resize(vchPlaintext.size() + AES_BLOCKSIZE); - AES256CBCEncrypt enc(chKey, chIV, true); + AES256CBCEncrypt enc(vchKey.data(), vchIV.data(), true); size_t nLen = enc.Encrypt(&vchPlaintext[0], vchPlaintext.size(), &vchCiphertext[0]); if(nLen < vchPlaintext.size()) return false; @@ -101,7 +101,7 @@ bool CCrypter::Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingM vchPlaintext.resize(nLen); - AES256CBCDecrypt dec(chKey, chIV, true); + AES256CBCDecrypt dec(vchKey.data(), vchIV.data(), true); nLen = dec.Decrypt(&vchCiphertext[0], vchCiphertext.size(), &vchPlaintext[0]); if(nLen == 0) return false; diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h index 5d0a4a3305..e89c15b5d4 100644 --- a/src/wallet/crypter.h +++ b/src/wallet/crypter.h @@ -47,7 +47,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(vchCryptedKey); READWRITE(vchSalt); READWRITE(nDerivationMethod); @@ -77,8 +77,8 @@ class CCrypter { friend class wallet_crypto::TestCrypter; // for test access to chKey/chIV private: - unsigned char chKey[WALLET_CRYPTO_KEY_SIZE]; - unsigned char chIV[WALLET_CRYPTO_IV_SIZE]; + std::vector<unsigned char, secure_allocator<unsigned char>> vchKey; + std::vector<unsigned char, secure_allocator<unsigned char>> vchIV; bool fKeySet; int BytesToKeySHA512AES(const std::vector<unsigned char>& chSalt, const SecureString& strKeyData, int count, unsigned char *key,unsigned char *iv) const; @@ -91,28 +91,21 @@ public: void CleanKey() { - memory_cleanse(chKey, sizeof(chKey)); - memory_cleanse(chIV, sizeof(chIV)); + memory_cleanse(vchKey.data(), vchKey.size()); + memory_cleanse(vchIV.data(), vchIV.size()); fKeySet = false; } CCrypter() { fKeySet = false; - - // Try to keep the key data out of swap (and be a bit over-careful to keep the IV that we don't even use out of swap) - // Note that this does nothing about suspend-to-disk (which will put all our key data on disk) - // Note as well that at no point in this program is any attempt made to prevent stealing of keys by reading the memory of the running process. - LockedPageManager::Instance().LockRange(&chKey[0], sizeof chKey); - LockedPageManager::Instance().LockRange(&chIV[0], sizeof chIV); + vchKey.resize(WALLET_CRYPTO_KEY_SIZE); + vchIV.resize(WALLET_CRYPTO_IV_SIZE); } ~CCrypter() { CleanKey(); - - LockedPageManager::Instance().UnlockRange(&chKey[0], sizeof chKey); - LockedPageManager::Instance().UnlockRange(&chIV[0], sizeof chIV); } }; diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index b638810e9d..008a4ece19 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -6,7 +6,7 @@ #include "chain.h" #include "rpc/server.h" #include "init.h" -#include "main.h" +#include "validation.h" #include "script/script.h" #include "script/standard.h" #include "sync.h" @@ -267,11 +267,11 @@ UniValue importprunedfunds(const JSONRPCRequest& request) "2. \"txoutproof\" (string, required) The hex output from gettxoutproof that contains the transaction\n" ); - CTransaction tx; + CMutableTransaction tx; if (!DecodeHexTx(tx, request.params[0].get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); uint256 hashTx = tx.GetHash(); - CWalletTx wtx(pwalletMain,tx); + CWalletTx wtx(pwalletMain, MakeTransactionRef(std::move(tx))); CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION); CMerkleBlock merkleBlock; @@ -304,7 +304,7 @@ UniValue importprunedfunds(const JSONRPCRequest& request) LOCK2(cs_main, pwalletMain->cs_wallet); - if (pwalletMain->IsMine(tx)) { + if (pwalletMain->IsMine(wtx)) { pwalletMain->AddToWallet(wtx, false); return NullUniValue; } @@ -798,8 +798,8 @@ UniValue processImport(const UniValue& data) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string"); } - std::vector<unsigned char> data(ParseHex(strPubKey)); - CPubKey pubKey(data.begin(), data.end()); + std::vector<unsigned char> vData(ParseHex(strPubKey)); + CPubKey pubKey(vData.begin(), vData.end()); if (!pubKey.IsFullyValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key"); @@ -1017,7 +1017,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest) bool fRunScan = false; const int64_t minimumTimestamp = 1; - int64_t nLowestTimestamp; + int64_t nLowestTimestamp = 0; if (fRescan && chainActive.Tip()) { nLowestTimestamp = chainActive.Tip()->GetBlockTime(); diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index ab7e74988a..5a4fcc743c 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -6,9 +6,10 @@ #include "amount.h" #include "base58.h" #include "chain.h" +#include "consensus/validation.h" #include "core_io.h" #include "init.h" -#include "main.h" +#include "validation.h" #include "net.h" #include "policy/rbf.h" #include "rpc/server.h" @@ -361,12 +362,15 @@ static void SendMoney(const CTxDestination &address, CAmount nValue, bool fSubtr CRecipient recipient = {scriptPubKey, nValue, fSubtractFeeFromAmount}; vecSend.push_back(recipient); if (!pwalletMain->CreateTransaction(vecSend, wtxNew, reservekey, nFeeRequired, nChangePosRet, strError)) { - if (!fSubtractFeeFromAmount && nValue + nFeeRequired > pwalletMain->GetBalance()) - strError = strprintf("Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!", FormatMoney(nFeeRequired)); + if (!fSubtractFeeFromAmount && nValue + nFeeRequired > curBalance) + strError = strprintf("Error: This transaction requires a transaction fee of at least %s", FormatMoney(nFeeRequired)); + throw JSONRPCError(RPC_WALLET_ERROR, strError); + } + CValidationState state; + if (!pwalletMain->CommitTransaction(wtxNew, reservekey, g_connman.get(), state)) { + strError = strprintf("Error: The transaction was rejected! Reason given: %s", state.GetRejectReason()); throw JSONRPCError(RPC_WALLET_ERROR, strError); } - if (!pwalletMain->CommitTransaction(wtxNew, reservekey, g_connman.get())) - throw JSONRPCError(RPC_WALLET_ERROR, "Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of the wallet and coins were spent in the copy but not marked as spent here."); } UniValue sendtoaddress(const JSONRPCRequest& request) @@ -579,10 +583,10 @@ UniValue getreceivedbyaddress(const JSONRPCRequest& request) for (map<uint256, CWalletTx>::iterator it = pwalletMain->mapWallet.begin(); it != pwalletMain->mapWallet.end(); ++it) { const CWalletTx& wtx = (*it).second; - if (wtx.IsCoinBase() || !CheckFinalTx(wtx)) + if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx)) continue; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) if (txout.scriptPubKey == scriptPubKey) if (wtx.GetDepthInMainChain() >= nMinDepth) nAmount += txout.nValue; @@ -633,10 +637,10 @@ UniValue getreceivedbyaccount(const JSONRPCRequest& request) for (map<uint256, CWalletTx>::iterator it = pwalletMain->mapWallet.begin(); it != pwalletMain->mapWallet.end(); ++it) { const CWalletTx& wtx = (*it).second; - if (wtx.IsCoinBase() || !CheckFinalTx(wtx)) + if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx)) continue; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { CTxDestination address; if (ExtractDestination(txout.scriptPubKey, address) && IsMine(*pwalletMain, address) && setAddress.count(address)) @@ -959,8 +963,11 @@ UniValue sendmany(const JSONRPCRequest& request) bool fCreated = pwalletMain->CreateTransaction(vecSend, wtx, keyChange, nFeeRequired, nChangePosRet, strFailReason); if (!fCreated) throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, strFailReason); - if (!pwalletMain->CommitTransaction(wtx, keyChange, g_connman.get())) - throw JSONRPCError(RPC_WALLET_ERROR, "Transaction commit failed"); + CValidationState state; + if (!pwalletMain->CommitTransaction(wtx, keyChange, g_connman.get(), state)) { + strFailReason = strprintf("Transaction commit failed:: %s", state.GetRejectReason()); + throw JSONRPCError(RPC_WALLET_ERROR, strFailReason); + } return wtx.GetHash().GetHex(); } @@ -1142,14 +1149,14 @@ UniValue ListReceived(const UniValue& params, bool fByAccounts) { const CWalletTx& wtx = (*it).second; - if (wtx.IsCoinBase() || !CheckFinalTx(wtx)) + if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx)) continue; int nDepth = wtx.GetDepthInMainChain(); if (nDepth < nMinDepth) continue; - BOOST_FOREACH(const CTxOut& txout, wtx.vout) + BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) { CTxDestination address; if (!ExtractDestination(txout.scriptPubKey, address)) @@ -1773,7 +1780,7 @@ UniValue gettransaction(const JSONRPCRequest& request) CAmount nCredit = wtx.GetCredit(filter); CAmount nDebit = wtx.GetDebit(filter); CAmount nNet = nCredit - nDebit; - CAmount nFee = (wtx.IsFromMe(filter) ? wtx.GetValueOut() - nDebit : 0); + CAmount nFee = (wtx.IsFromMe(filter) ? wtx.tx->GetValueOut() - nDebit : 0); entry.push_back(Pair("amount", ValueFromAmount(nNet - nFee))); if (wtx.IsFromMe(filter)) @@ -1785,7 +1792,7 @@ UniValue gettransaction(const JSONRPCRequest& request) ListTransactions(wtx, "*", 0, false, details, filter); entry.push_back(Pair("details", details)); - string strHex = EncodeHexTx(static_cast<CTransaction>(wtx)); + string strHex = EncodeHexTx(static_cast<CTransaction>(wtx), RPCSerializationFlags()); entry.push_back(Pair("hex", strHex)); return entry; @@ -2279,7 +2286,7 @@ UniValue getwalletinfo(const JSONRPCRequest& request) " \"unconfirmed_balance\": xxx, (numeric) the total unconfirmed balance of the wallet in " + CURRENCY_UNIT + "\n" " \"immature_balance\": xxxxxx, (numeric) the total immature balance of the wallet in " + CURRENCY_UNIT + "\n" " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n" - " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since GMT epoch) of the oldest pre-generated key in the key pool\n" + " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since Unix epoch) of the oldest pre-generated key in the key pool\n" " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated\n" " \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n" " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n" @@ -2413,7 +2420,7 @@ UniValue listunspent(const JSONRPCRequest& request) continue; CTxDestination address; - const CScript& scriptPubKey = out.tx->vout[out.i].scriptPubKey; + const CScript& scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey; bool fValidAddress = ExtractDestination(scriptPubKey, address); if (setAddress.size() && (!fValidAddress || !setAddress.count(address))) @@ -2438,7 +2445,7 @@ UniValue listunspent(const JSONRPCRequest& request) } entry.push_back(Pair("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end()))); - entry.push_back(Pair("amount", ValueFromAmount(out.tx->vout[out.i].nValue))); + entry.push_back(Pair("amount", ValueFromAmount(out.tx->tx->vout[out.i].nValue))); entry.push_back(Pair("confirmations", out.nDepth)); entry.push_back(Pair("spendable", out.fSpendable)); entry.push_back(Pair("solvable", out.fSolvable)); @@ -2550,17 +2557,16 @@ UniValue fundrawtransaction(const JSONRPCRequest& request) } // parse hex string from parameter - CTransaction origTx; - if (!DecodeHexTx(origTx, request.params[0].get_str(), true)) + CMutableTransaction tx; + if (!DecodeHexTx(tx, request.params[0].get_str(), true)) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); - if (origTx.vout.size() == 0) + if (tx.vout.size() == 0) throw JSONRPCError(RPC_INVALID_PARAMETER, "TX must have at least one output"); - if (changePosition != -1 && (changePosition < 0 || (unsigned int)changePosition > origTx.vout.size())) + if (changePosition != -1 && (changePosition < 0 || (unsigned int)changePosition > tx.vout.size())) throw JSONRPCError(RPC_INVALID_PARAMETER, "changePosition out of bounds"); - CMutableTransaction tx(origTx); CAmount nFeeOut; string strFailReason; diff --git a/src/wallet/test/accounting_tests.cpp b/src/wallet/test/accounting_tests.cpp index a833be13d0..eb14d176bd 100644 --- a/src/wallet/test/accounting_tests.cpp +++ b/src/wallet/test/accounting_tests.cpp @@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade) { CMutableTransaction tx(wtx); --tx.nLockTime; // Just to change the hash :) - *static_cast<CTransaction*>(&wtx) = CTransaction(tx); + wtx.SetTx(MakeTransactionRef(std::move(tx))); } pwalletMain->AddToWallet(wtx); vpwtx.push_back(&pwalletMain->mapWallet[wtx.GetHash()]); @@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade) { CMutableTransaction tx(wtx); --tx.nLockTime; // Just to change the hash :) - *static_cast<CTransaction*>(&wtx) = CTransaction(tx); + wtx.SetTx(MakeTransactionRef(std::move(tx))); } pwalletMain->AddToWallet(wtx); vpwtx.push_back(&pwalletMain->mapWallet[wtx.GetHash()]); diff --git a/src/wallet/test/crypto_tests.cpp b/src/wallet/test/crypto_tests.cpp index c5f55ef5f0..c64c76244e 100644 --- a/src/wallet/test/crypto_tests.cpp +++ b/src/wallet/test/crypto_tests.cpp @@ -42,15 +42,19 @@ bool OldEncrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned char> int nCLen = nLen + AES_BLOCK_SIZE, nFLen = 0; vchCiphertext = std::vector<unsigned char> (nCLen); - EVP_CIPHER_CTX ctx; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + + if (!ctx) return false; bool fOk = true; - EVP_CIPHER_CTX_init(&ctx); - if (fOk) fOk = EVP_EncryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0; - if (fOk) fOk = EVP_EncryptUpdate(&ctx, &vchCiphertext[0], &nCLen, &vchPlaintext[0], nLen) != 0; - if (fOk) fOk = EVP_EncryptFinal_ex(&ctx, (&vchCiphertext[0]) + nCLen, &nFLen) != 0; - EVP_CIPHER_CTX_cleanup(&ctx); + EVP_CIPHER_CTX_init(ctx); + if (fOk) fOk = EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0; + if (fOk) fOk = EVP_EncryptUpdate(ctx, &vchCiphertext[0], &nCLen, &vchPlaintext[0], nLen) != 0; + if (fOk) fOk = EVP_EncryptFinal_ex(ctx, (&vchCiphertext[0]) + nCLen, &nFLen) != 0; + EVP_CIPHER_CTX_cleanup(ctx); + + EVP_CIPHER_CTX_free(ctx); if (!fOk) return false; @@ -66,15 +70,19 @@ bool OldDecrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingMaterial vchPlaintext = CKeyingMaterial(nPLen); - EVP_CIPHER_CTX ctx; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + + if (!ctx) return false; bool fOk = true; - EVP_CIPHER_CTX_init(&ctx); - if (fOk) fOk = EVP_DecryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0; - if (fOk) fOk = EVP_DecryptUpdate(&ctx, &vchPlaintext[0], &nPLen, &vchCiphertext[0], nLen) != 0; - if (fOk) fOk = EVP_DecryptFinal_ex(&ctx, (&vchPlaintext[0]) + nPLen, &nFLen) != 0; - EVP_CIPHER_CTX_cleanup(&ctx); + EVP_CIPHER_CTX_init(ctx); + if (fOk) fOk = EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0; + if (fOk) fOk = EVP_DecryptUpdate(ctx, &vchPlaintext[0], &nPLen, &vchCiphertext[0], nLen) != 0; + if (fOk) fOk = EVP_DecryptFinal_ex(ctx, (&vchPlaintext[0]) + nPLen, &nFLen) != 0; + EVP_CIPHER_CTX_cleanup(ctx); + + EVP_CIPHER_CTX_free(ctx); if (!fOk) return false; @@ -97,10 +105,10 @@ static void TestPassphraseSingle(const std::vector<unsigned char>& vchSalt, cons OldSetKeyFromPassphrase(passphrase, vchSalt, rounds, 0, chKey, chIV); - BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.chKey, sizeof(chKey)) == 0, \ - HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.chKey, crypt.chKey + (sizeof crypt.chKey))); - BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.chIV, sizeof(chIV)) == 0, \ - HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.chIV, crypt.chIV + (sizeof crypt.chIV))); + BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.vchKey.data(), crypt.vchKey.size()) == 0, \ + HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.vchKey)); + BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.vchIV.data(), crypt.vchIV.size()) == 0, \ + HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.vchIV)); if(!correctKey.empty()) BOOST_CHECK_MESSAGE(memcmp(chKey, &correctKey[0], sizeof(chKey)) == 0, \ @@ -127,7 +135,7 @@ static void TestDecrypt(const CCrypter& crypt, const std::vector<unsigned char>& CKeyingMaterial vchDecrypted2; int result1, result2; result1 = crypt.Decrypt(vchCiphertext, vchDecrypted1); - result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.chKey, crypt.chIV); + result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.vchKey.data(), crypt.vchIV.data()); BOOST_CHECK(result1 == result2); // These two should be equal. However, OpenSSL 1.0.1j introduced a change @@ -152,7 +160,7 @@ static void TestEncryptSingle(const CCrypter& crypt, const CKeyingMaterial& vchP std::vector<unsigned char> vchCiphertext2; int result1 = crypt.Encrypt(vchPlaintext, vchCiphertext1); - int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.chKey, crypt.chIV); + int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.vchKey.data(), crypt.vchIV.data()); BOOST_CHECK(result1 == result2); BOOST_CHECK(vchCiphertext1 == vchCiphertext2); diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index acf980c784..8085706a99 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -42,7 +42,7 @@ static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = fa // so stop vin being empty, and cache a non-zero Debit to fake out IsFromMe() tx.vin.resize(1); } - CWalletTx* wtx = new CWalletTx(&wallet, tx); + CWalletTx* wtx = new CWalletTx(&wallet, MakeTransactionRef(std::move(tx))); if (fIsFromMe) { wtx->fDebitCached = true; @@ -266,7 +266,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests) // test with many inputs for (CAmount amt=1500; amt < COIN; amt*=10) { empty_wallet(); - // Create 676 inputs (= MAX_STANDARD_TX_SIZE / 148 bytes per input) + // Create 676 inputs (= (old MAX_STANDARD_TX_SIZE == 100000) / 148 bytes per input) for (uint16_t j = 0; j < 676; j++) add_coin(amt); BOOST_CHECK(wallet.SelectCoinsMinConf(2000, 1, 1, vCoins, setCoinsRet, nValueRet)); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index c7f98b238e..4c6916c04a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -8,12 +8,12 @@ #include "base58.h" #include "checkpoints.h" #include "chain.h" -#include "coincontrol.h" +#include "wallet/coincontrol.h" #include "consensus/consensus.h" #include "consensus/validation.h" #include "key.h" #include "keystore.h" -#include "main.h" +#include "validation.h" #include "net.h" #include "policy/policy.h" #include "primitives/block.h" @@ -75,7 +75,7 @@ struct CompareValueOnly std::string COutput::ToString() const { - return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->vout[i].nValue)); + return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue)); } const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const @@ -400,7 +400,7 @@ set<uint256> CWallet::GetConflicts(const uint256& txid) const std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range; - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { if (mapTxSpends.count(txin.prevout) <= 1) continue; // No conflict if zero or one spends @@ -552,7 +552,7 @@ void CWallet::AddToSpends(const uint256& wtxid) if (thisTx.IsCoinBase()) // Coinbases don't spend anything! return; - BOOST_FOREACH(const CTxIn& txin, thisTx.vin) + BOOST_FOREACH(const CTxIn& txin, thisTx.tx->vin) AddToSpends(txin.prevout, wtxid); } @@ -658,8 +658,79 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase) DBErrors CWallet::ReorderTransactions() { + LOCK(cs_wallet); CWalletDB walletdb(strWalletFile); - return walletdb.ReorderTransactions(this); + + // Old wallets didn't have any defined order for transactions + // Probably a bad idea to change the output of this + + // First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap. + typedef pair<CWalletTx*, CAccountingEntry*> TxPair; + typedef multimap<int64_t, TxPair > TxItems; + TxItems txByTime; + + for (map<uint256, CWalletTx>::iterator it = mapWallet.begin(); it != mapWallet.end(); ++it) + { + CWalletTx* wtx = &((*it).second); + txByTime.insert(make_pair(wtx->nTimeReceived, TxPair(wtx, (CAccountingEntry*)0))); + } + list<CAccountingEntry> acentries; + walletdb.ListAccountCreditDebit("", acentries); + BOOST_FOREACH(CAccountingEntry& entry, acentries) + { + txByTime.insert(make_pair(entry.nTime, TxPair((CWalletTx*)0, &entry))); + } + + nOrderPosNext = 0; + std::vector<int64_t> nOrderPosOffsets; + for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it) + { + CWalletTx *const pwtx = (*it).second.first; + CAccountingEntry *const pacentry = (*it).second.second; + int64_t& nOrderPos = (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos; + + if (nOrderPos == -1) + { + nOrderPos = nOrderPosNext++; + nOrderPosOffsets.push_back(nOrderPos); + + if (pwtx) + { + if (!walletdb.WriteTx(*pwtx)) + return DB_LOAD_FAIL; + } + else + if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) + return DB_LOAD_FAIL; + } + else + { + int64_t nOrderPosOff = 0; + BOOST_FOREACH(const int64_t& nOffsetStart, nOrderPosOffsets) + { + if (nOrderPos >= nOffsetStart) + ++nOrderPosOff; + } + nOrderPos += nOrderPosOff; + nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1); + + if (!nOrderPosOff) + continue; + + // Since we're changing the order, write it back + if (pwtx) + { + if (!walletdb.WriteTx(*pwtx)) + return DB_LOAD_FAIL; + } + else + if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) + return DB_LOAD_FAIL; + } + } + walletdb.WriteOrderPosNext(nOrderPosNext); + + return DB_LOAD_OK; } int64_t CWallet::IncOrderPosNext(CWalletDB *pwalletdb) @@ -724,7 +795,7 @@ bool CWallet::GetAccountPubkey(CPubKey &pubKey, std::string strAccount, bool bFo for (map<uint256, CWalletTx>::iterator it = mapWallet.begin(); it != mapWallet.end() && account.vchPubKey.IsValid(); ++it) - BOOST_FOREACH(const CTxOut& txout, (*it).second.vout) + BOOST_FOREACH(const CTxOut& txout, (*it).second.tx->vout) if (txout.scriptPubKey == scriptPubKey) { bForceNew = true; break; @@ -883,7 +954,7 @@ bool CWallet::LoadToWallet(const CWalletTx& wtxIn) wtx.BindWallet(this); wtxOrdered.insert(make_pair(wtx.nOrderPos, TxPair(&wtx, (CAccountingEntry*)0))); AddToSpends(hash); - BOOST_FOREACH(const CTxIn& txin, wtx.vin) { + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { if (mapWallet.count(txin.prevout.hash)) { CWalletTx& prevtx = mapWallet[txin.prevout.hash]; if (prevtx.nIndex == -1 && !prevtx.hashUnset()) { @@ -922,7 +993,7 @@ bool CWallet::AddToWalletIfInvolvingMe(const CTransaction& tx, const CBlockIndex if (fExisted && !fUpdate) return false; if (fExisted || IsMine(tx) || IsFromMe(tx)) { - CWalletTx wtx(this,tx); + CWalletTx wtx(this, MakeTransactionRef(tx)); // Get merkle branch if transaction was found in a block if (posInBlock != -1) @@ -981,7 +1052,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) } // If a transaction changes 'conflicted' state, that changes the balance // available of the outputs it spends. So force those to be recomputed - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { if (mapWallet.count(txin.prevout.hash)) mapWallet[txin.prevout.hash].MarkDirty(); @@ -1042,7 +1113,7 @@ void CWallet::MarkConflicted(const uint256& hashBlock, const uint256& hashTx) } // If a transaction changes 'conflicted' state, that changes the balance // available of the outputs it spends. So force those to be recomputed - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { if (mapWallet.count(txin.prevout.hash)) mapWallet[txin.prevout.hash].MarkDirty(); @@ -1077,8 +1148,8 @@ isminetype CWallet::IsMine(const CTxIn &txin) const if (mi != mapWallet.end()) { const CWalletTx& prev = (*mi).second; - if (txin.prevout.n < prev.vout.size()) - return IsMine(prev.vout[txin.prevout.n]); + if (txin.prevout.n < prev.tx->vout.size()) + return IsMine(prev.tx->vout[txin.prevout.n]); } } return ISMINE_NO; @@ -1092,9 +1163,9 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const isminefilter& filter) const if (mi != mapWallet.end()) { const CWalletTx& prev = (*mi).second; - if (txin.prevout.n < prev.vout.size()) - if (IsMine(prev.vout[txin.prevout.n]) & filter) - return prev.vout[txin.prevout.n].nValue; + if (txin.prevout.n < prev.tx->vout.size()) + if (IsMine(prev.tx->vout[txin.prevout.n]) & filter) + return prev.tx->vout[txin.prevout.n].nValue; } } return 0; @@ -1309,14 +1380,14 @@ void CWalletTx::GetAmounts(list<COutputEntry>& listReceived, CAmount nDebit = GetDebit(filter); if (nDebit > 0) // debit>0 means we signed/sent this transaction { - CAmount nValueOut = GetValueOut(); + CAmount nValueOut = tx->GetValueOut(); nFee = nDebit - nValueOut; } // Sent/received. - for (unsigned int i = 0; i < vout.size(); ++i) + for (unsigned int i = 0; i < tx->vout.size(); ++i) { - const CTxOut& txout = vout[i]; + const CTxOut& txout = tx->vout[i]; isminetype fIsMine = pwallet->IsMine(txout); // Only need to handle txouts if AT LEAST one of these is true: // 1) they debit from us (sent) @@ -1421,7 +1492,7 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate) int posInBlock; for (posInBlock = 0; posInBlock < (int)block.vtx.size(); posInBlock++) { - if (AddToWalletIfInvolvingMe(block.vtx[posInBlock], pindex, posInBlock, fUpdate)) + if (AddToWalletIfInvolvingMe(*block.vtx[posInBlock], pindex, posInBlock, fUpdate)) ret++; } pindex = chainActive.Next(pindex); @@ -1463,16 +1534,19 @@ void CWallet::ReacceptWalletTransactions() CWalletTx& wtx = *(item.second); LOCK(mempool.cs); - wtx.AcceptToMemoryPool(maxTxFee); + CValidationState state; + wtx.AcceptToMemoryPool(maxTxFee, state); } } bool CWalletTx::RelayWalletTransaction(CConnman* connman) { assert(pwallet->GetBroadcastTransactions()); - if (!IsCoinBase()) + if (!IsCoinBase() && !isAbandoned() && GetDepthInMainChain() == 0) { - if (GetDepthInMainChain() == 0 && !isAbandoned() && InMempool()) { + CValidationState state; + /* GetDepthInMainChain already catches known conflicts. */ + if (InMempool() || AcceptToMemoryPool(maxTxFee, state)) { LogPrintf("Relaying wtx %s\n", GetHash().ToString()); if (connman) { CInv inv(MSG_TX, GetHash()); @@ -1501,7 +1575,7 @@ set<uint256> CWalletTx::GetConflicts() const CAmount CWalletTx::GetDebit(const isminefilter& filter) const { - if (vin.empty()) + if (tx->vin.empty()) return 0; CAmount debit = 0; @@ -1536,7 +1610,7 @@ CAmount CWalletTx::GetCredit(const isminefilter& filter) const if (IsCoinBase() && GetBlocksToMaturity() > 0) return 0; - int64_t credit = 0; + CAmount credit = 0; if (filter & ISMINE_SPENDABLE) { // GetBalance can assume transactions in mapWallet won't change @@ -1591,11 +1665,11 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache) const CAmount nCredit = 0; uint256 hashTx = GetHash(); - for (unsigned int i = 0; i < vout.size(); i++) + for (unsigned int i = 0; i < tx->vout.size(); i++) { if (!pwallet->IsSpent(hashTx, i)) { - const CTxOut &txout = vout[i]; + const CTxOut &txout = tx->vout[i]; nCredit += pwallet->GetCredit(txout, ISMINE_SPENDABLE); if (!MoneyRange(nCredit)) throw std::runtime_error("CWalletTx::GetAvailableCredit() : value out of range"); @@ -1634,11 +1708,11 @@ CAmount CWalletTx::GetAvailableWatchOnlyCredit(const bool& fUseCache) const return nAvailableWatchCreditCached; CAmount nCredit = 0; - for (unsigned int i = 0; i < vout.size(); i++) + for (unsigned int i = 0; i < tx->vout.size(); i++) { if (!pwallet->IsSpent(GetHash(), i)) { - const CTxOut &txout = vout[i]; + const CTxOut &txout = tx->vout[i]; nCredit += pwallet->GetCredit(txout, ISMINE_WATCH_ONLY); if (!MoneyRange(nCredit)) throw std::runtime_error("CWalletTx::GetAvailableCredit() : value out of range"); @@ -1686,23 +1760,23 @@ bool CWalletTx::IsTrusted() const return false; // Trusted if all inputs are from us and are in the mempool: - BOOST_FOREACH(const CTxIn& txin, vin) + BOOST_FOREACH(const CTxIn& txin, tx->vin) { // Transactions not sent by us: not trusted const CWalletTx* parent = pwallet->GetWalletTx(txin.prevout.hash); if (parent == NULL) return false; - const CTxOut& parentOut = parent->vout[txin.prevout.n]; + const CTxOut& parentOut = parent->tx->vout[txin.prevout.n]; if (pwallet->IsMine(parentOut) != ISMINE_SPENDABLE) return false; } return true; } -bool CWalletTx::IsEquivalentTo(const CWalletTx& tx) const +bool CWalletTx::IsEquivalentTo(const CWalletTx& _tx) const { - CMutableTransaction tx1 = *this; - CMutableTransaction tx2 = tx; + CMutableTransaction tx1 = *this->tx; + CMutableTransaction tx2 = *_tx.tx; for (unsigned int i = 0; i < tx1.vin.size(); i++) tx1.vin[i].scriptSig = CScript(); for (unsigned int i = 0; i < tx2.vin.size(); i++) tx2.vin[i].scriptSig = CScript(); return CTransaction(tx1) == CTransaction(tx2); @@ -1885,10 +1959,10 @@ void CWallet::AvailableCoins(vector<COutput>& vCoins, bool fOnlyConfirmed, const if (nDepth == 0 && !pcoin->InMempool()) continue; - for (unsigned int i = 0; i < pcoin->vout.size(); i++) { - isminetype mine = IsMine(pcoin->vout[i]); + for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) { + isminetype mine = IsMine(pcoin->tx->vout[i]); if (!(IsSpent(wtxid, i)) && mine != ISMINE_NO && - !IsLockedCoin((*it).first, i) && (pcoin->vout[i].nValue > 0 || fIncludeZeroValue) && + !IsLockedCoin((*it).first, i) && (pcoin->tx->vout[i].nValue > 0 || fIncludeZeroValue) && (!coinControl || !coinControl->HasSelected() || coinControl->fAllowOtherInputs || coinControl->IsSelected(COutPoint((*it).first, i)))) vCoins.push_back(COutput(pcoin, i, nDepth, ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || @@ -1971,7 +2045,7 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int continue; int i = output.i; - CAmount n = pcoin->vout[i].nValue; + CAmount n = pcoin->tx->vout[i].nValue; pair<CAmount,pair<const CWalletTx*,unsigned int> > coin = make_pair(n,make_pair(pcoin, i)); @@ -2058,7 +2132,7 @@ bool CWallet::SelectCoins(const vector<COutput>& vAvailableCoins, const CAmount& { if (!out.fSpendable) continue; - nValueRet += out.tx->vout[out.i].nValue; + nValueRet += out.tx->tx->vout[out.i].nValue; setCoinsRet.insert(make_pair(out.tx, out.i)); } return (nValueRet >= nTargetValue); @@ -2078,9 +2152,9 @@ bool CWallet::SelectCoins(const vector<COutput>& vAvailableCoins, const CAmount& { const CWalletTx* pcoin = &it->second; // Clearly invalid input, fail - if (pcoin->vout.size() <= outpoint.n) + if (pcoin->tx->vout.size() <= outpoint.n) return false; - nValueFromPresetInputs += pcoin->vout[outpoint.n].nValue; + nValueFromPresetInputs += pcoin->tx->vout[outpoint.n].nValue; setPresetCoins.insert(make_pair(pcoin, outpoint.n)); } else return false; // TODO: Allow non-wallet inputs @@ -2136,10 +2210,10 @@ bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, bool ov return false; if (nChangePosInOut != -1) - tx.vout.insert(tx.vout.begin() + nChangePosInOut, wtx.vout[nChangePosInOut]); + tx.vout.insert(tx.vout.begin() + nChangePosInOut, wtx.tx->vout[nChangePosInOut]); // Add new txins (keeping original txin scriptSig/order) - BOOST_FOREACH(const CTxIn& txin, wtx.vin) + BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) { if (!coinControl.IsSelected(txin.prevout)) { @@ -2166,7 +2240,7 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt { if (nValue < 0 || recipient.nAmount < 0) { - strFailReason = _("Transaction amounts must be positive"); + strFailReason = _("Transaction amounts must not be negative"); return false; } nValue += recipient.nAmount; @@ -2174,9 +2248,9 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt if (recipient.fSubtractFeeFromAmount) nSubtractFeeFromAmount++; } - if (vecSend.empty() || nValue < 0) + if (vecSend.empty()) { - strFailReason = _("Transaction amounts must be positive"); + strFailReason = _("Transaction must have at least one recipient"); return false; } @@ -2279,7 +2353,7 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt } BOOST_FOREACH(PAIRTYPE(const CWalletTx*, unsigned int) pcoin, setCoins) { - CAmount nCredit = pcoin.first->vout[pcoin.second].nValue; + CAmount nCredit = pcoin.first->tx->vout[pcoin.second].nValue; //The coin age after the next block (depth+1) is used instead of the current, //reflecting an assumption the user would accept a bit more delay for //a chance at a free transaction. @@ -2317,7 +2391,11 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt CPubKey vchPubKey; bool ret; ret = reservekey.GetReservedKey(vchPubKey); - assert(ret); // should never fail, as we just unlocked + if (!ret) + { + strFailReason = _("Keypool ran out, please call keypoolrefill first"); + return false; + } scriptChange = GetScriptForDestination(vchPubKey.GetID()); } @@ -2394,10 +2472,10 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt BOOST_FOREACH(const PAIRTYPE(const CWalletTx*,unsigned int)& coin, setCoins) { bool signSuccess; - const CScript& scriptPubKey = coin.first->vout[coin.second].scriptPubKey; + const CScript& scriptPubKey = coin.first->tx->vout[coin.second].scriptPubKey; SignatureData sigdata; if (sign) - signSuccess = ProduceSignature(TransactionSignatureCreator(this, &txNewConst, nIn, coin.first->vout[coin.second].nValue, SIGHASH_ALL), scriptPubKey, sigdata); + signSuccess = ProduceSignature(TransactionSignatureCreator(this, &txNewConst, nIn, coin.first->tx->vout[coin.second].nValue, SIGHASH_ALL), scriptPubKey, sigdata); else signSuccess = ProduceSignature(DummySignatureCreator(this), scriptPubKey, sigdata); @@ -2422,28 +2500,33 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt } // Embed the constructed transaction data in wtxNew. - *static_cast<CTransaction*>(&wtxNew) = CTransaction(txNew); + wtxNew.SetTx(MakeTransactionRef(std::move(txNew))); // Limit size - if (GetTransactionWeight(txNew) >= MAX_STANDARD_TX_WEIGHT) + if (GetTransactionWeight(wtxNew) >= MAX_STANDARD_TX_WEIGHT) { strFailReason = _("Transaction too large"); return false; } - dPriority = wtxNew.ComputePriority(dPriority, nBytes); + dPriority = wtxNew.tx->ComputePriority(dPriority, nBytes); + + // Allow to override the default confirmation target over the CoinControl instance + int currentConfirmationTarget = nTxConfirmTarget; + if (coinControl && coinControl->nConfirmTarget > 0) + currentConfirmationTarget = coinControl->nConfirmTarget; // Can we complete this as a free transaction? if (fSendFreeTransactions && nBytes <= MAX_FREE_TRANSACTION_CREATE_SIZE) { // Not enough fee: enough priority? - double dPriorityNeeded = mempool.estimateSmartPriority(nTxConfirmTarget); + double dPriorityNeeded = mempool.estimateSmartPriority(currentConfirmationTarget); // Require at least hard-coded AllowFree. if (dPriority >= dPriorityNeeded && AllowFree(dPriority)) break; } - CAmount nFeeNeeded = GetMinimumFee(nBytes, nTxConfirmTarget, mempool); + CAmount nFeeNeeded = GetMinimumFee(nBytes, currentConfirmationTarget, mempool); if (coinControl && nFeeNeeded > 0 && coinControl->nMinimumTotalFee > nFeeNeeded) { nFeeNeeded = coinControl->nMinimumTotalFee; } @@ -2474,11 +2557,11 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt /** * Call after CreateTransaction unless you want to abort */ -bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CConnman* connman) +bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CConnman* connman, CValidationState& state) { { LOCK2(cs_main, cs_wallet); - LogPrintf("CommitTransaction:\n%s", wtxNew.ToString()); + LogPrintf("CommitTransaction:\n%s", wtxNew.tx->ToString()); { // Take key pair from key pool so it won't be used again reservekey.KeepKey(); @@ -2488,7 +2571,7 @@ bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CCon AddToWallet(wtxNew); // Notify that old coins are spent - BOOST_FOREACH(const CTxIn& txin, wtxNew.vin) + BOOST_FOREACH(const CTxIn& txin, wtxNew.tx->vin) { CWalletTx &coin = mapWallet[txin.prevout.hash]; coin.BindWallet(this); @@ -2502,12 +2585,12 @@ bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CCon if (fBroadcastTransactions) { // Broadcast - if (!wtxNew.AcceptToMemoryPool(maxTxFee)) { - // This must not fail. The transaction has already been signed and recorded. - LogPrintf("CommitTransaction(): Error: Transaction not valid\n"); - return false; + if (!wtxNew.AcceptToMemoryPool(maxTxFee, state)) { + LogPrintf("CommitTransaction(): Transaction cannot be broadcast immediately, %s\n", state.GetRejectReason()); + // TODO: if we expect the failure to be long term or permanent, instead delete wtx from the wallet and return failure. + } else { + wtxNew.RelayWalletTransaction(connman); } - wtxNew.RelayWalletTransaction(connman); } } return true; @@ -2852,7 +2935,7 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances() { CWalletTx *pcoin = &walletEntry.second; - if (!CheckFinalTx(*pcoin) || !pcoin->IsTrusted()) + if (!pcoin->IsTrusted()) continue; if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0) @@ -2862,15 +2945,15 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances() if (nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? 0 : 1)) continue; - for (unsigned int i = 0; i < pcoin->vout.size(); i++) + for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) { CTxDestination addr; - if (!IsMine(pcoin->vout[i])) + if (!IsMine(pcoin->tx->vout[i])) continue; - if(!ExtractDestination(pcoin->vout[i].scriptPubKey, addr)) + if(!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, addr)) continue; - CAmount n = IsSpent(walletEntry.first, i) ? 0 : pcoin->vout[i].nValue; + CAmount n = IsSpent(walletEntry.first, i) ? 0 : pcoin->tx->vout[i].nValue; if (!balances.count(addr)) balances[addr] = 0; @@ -2892,16 +2975,16 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings() { CWalletTx *pcoin = &walletEntry.second; - if (pcoin->vin.size() > 0) + if (pcoin->tx->vin.size() > 0) { bool any_mine = false; // group all input addresses with each other - BOOST_FOREACH(CTxIn txin, pcoin->vin) + BOOST_FOREACH(CTxIn txin, pcoin->tx->vin) { CTxDestination address; if(!IsMine(txin)) /* If this input isn't mine, ignore it */ continue; - if(!ExtractDestination(mapWallet[txin.prevout.hash].vout[txin.prevout.n].scriptPubKey, address)) + if(!ExtractDestination(mapWallet[txin.prevout.hash].tx->vout[txin.prevout.n].scriptPubKey, address)) continue; grouping.insert(address); any_mine = true; @@ -2910,7 +2993,7 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings() // group change with input addresses if (any_mine) { - BOOST_FOREACH(CTxOut txout, pcoin->vout) + BOOST_FOREACH(CTxOut txout, pcoin->tx->vout) if (IsChange(txout)) { CTxDestination txoutAddr; @@ -2927,11 +3010,11 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings() } // group lone addrs by themselves - for (unsigned int i = 0; i < pcoin->vout.size(); i++) - if (IsMine(pcoin->vout[i])) + for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) + if (IsMine(pcoin->tx->vout[i])) { CTxDestination address; - if(!ExtractDestination(pcoin->vout[i].scriptPubKey, address)) + if(!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, address)) continue; grouping.insert(address); groupings.insert(grouping); @@ -3198,7 +3281,7 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64_t> &mapKeyBirth) const { if (blit != mapBlockIndex.end() && chainActive.Contains(blit->second)) { // ... which are already in a block int nHeight = blit->second->nHeight; - BOOST_FOREACH(const CTxOut &txout, wtx.vout) { + BOOST_FOREACH(const CTxOut &txout, wtx.tx->vout) { // iterate over all their outputs CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey); BOOST_FOREACH(const CKeyID &keyid, vAffected) { @@ -3472,6 +3555,16 @@ bool CWallet::InitLoadWallet() return true; } +void CWallet::postInitProcess(boost::thread_group& threadGroup) +{ + // Add wallet transactions that aren't already in a block to mempool + // Do this here as mempool requires genesis block to be loaded + ReacceptWalletTransactions(); + + // Run a thread to flush wallet periodically + threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(this->strWalletFile))); +} + bool CWallet::ParameterInteraction() { if (GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) @@ -3481,6 +3574,16 @@ bool CWallet::ParameterInteraction() LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -walletbroadcast=0\n", __func__); } + if (GetBoolArg("-salvagewallet", false) && SoftSetBoolArg("-rescan", true)) { + // Rewrite just private keys: rescan to find transactions + LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting -rescan=1\n", __func__); + } + + // -zapwallettx implies a rescan + if (GetBoolArg("-zapwallettxes", false) && SoftSetBoolArg("-rescan", true)) { + LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting -rescan=1\n", __func__); + } + if (GetBoolArg("-sysperms", false)) return InitError("-sysperms is not allowed in combination with enabled wallet functionality"); if (GetArg("-prune", 0) && GetBoolArg("-rescan", false)) @@ -3649,8 +3752,7 @@ int CMerkleTx::GetBlocksToMaturity() const } -bool CMerkleTx::AcceptToMemoryPool(const CAmount& nAbsurdFee) +bool CMerkleTx::AcceptToMemoryPool(const CAmount& nAbsurdFee, CValidationState& state) { - CValidationState state; return ::AcceptToMemoryPool(mempool, state, *this, true, NULL, false, nAbsurdFee); } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 3b37f7cb1f..f7103b6a80 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -27,6 +27,7 @@ #include <vector> #include <boost/shared_ptr.hpp> +#include <boost/thread.hpp> extern CWallet* pwalletMain; @@ -53,7 +54,7 @@ static const bool DEFAULT_SPEND_ZEROCONF_CHANGE = true; //! Default for -sendfreetransactions static const bool DEFAULT_SEND_FREE_TRANSACTIONS = false; //! -txconfirmtarget default -static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 2; +static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 6; //! -walletrbf default static const bool DEFAULT_WALLET_RBF = false; //! Largest (in bytes) free transaction we're willing to create @@ -99,8 +100,9 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(nTime); READWRITE(vchPubKey); @@ -133,7 +135,7 @@ struct CRecipient typedef std::map<std::string, std::string> mapValue_t; -static void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue) +static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue) { if (!mapValue.count("n")) { @@ -144,7 +146,7 @@ static void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue) } -static void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue) +static inline void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue) { if (nOrderPos == -1) return; @@ -159,13 +161,14 @@ struct COutputEntry }; /** A transaction with a merkle branch linking it to the block chain. */ -class CMerkleTx : public CTransaction +class CMerkleTx { private: /** Constant used in hashBlock to indicate tx has been abandoned */ static const uint256 ABANDON_HASH; public: + CTransactionRef tx; uint256 hashBlock; /* An nIndex == -1 means that hashBlock (in nonzero) refers to the earliest @@ -177,26 +180,37 @@ public: CMerkleTx() { + SetTx(MakeTransactionRef()); Init(); } - CMerkleTx(const CTransaction& txIn) : CTransaction(txIn) + CMerkleTx(CTransactionRef arg) { + SetTx(std::move(arg)); Init(); } + /** Helper conversion operator to allow passing CMerkleTx where CTransaction is expected. + * TODO: adapt callers and remove this operator. */ + operator const CTransaction&() const { return *tx; } + void Init() { hashBlock = uint256(); nIndex = -1; } + void SetTx(CTransactionRef arg) + { + tx = std::move(arg); + } + ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { std::vector<uint256> vMerkleBranch; // For compatibility with older versions. - READWRITE(*(CTransaction*)this); + READWRITE(tx); READWRITE(hashBlock); READWRITE(vMerkleBranch); READWRITE(nIndex); @@ -215,10 +229,13 @@ public: bool IsInMainChain() const { const CBlockIndex *pindexRet; return GetDepthInMainChain(pindexRet) > 0; } int GetBlocksToMaturity() const; /** Pass this transaction to the mempool. Fails if absolute fee exceeds absurd fee. */ - bool AcceptToMemoryPool(const CAmount& nAbsurdFee); + bool AcceptToMemoryPool(const CAmount& nAbsurdFee, CValidationState& state); bool hashUnset() const { return (hashBlock.IsNull() || hashBlock == ABANDON_HASH); } bool isAbandoned() const { return (hashBlock == ABANDON_HASH); } void setAbandoned() { hashBlock = ABANDON_HASH; } + + const uint256& GetHash() const { return tx->GetHash(); } + bool IsCoinBase() const { return tx->IsCoinBase(); } }; /** @@ -265,17 +282,7 @@ public: Init(NULL); } - CWalletTx(const CWallet* pwalletIn) - { - Init(pwalletIn); - } - - CWalletTx(const CWallet* pwalletIn, const CMerkleTx& txIn) : CMerkleTx(txIn) - { - Init(pwalletIn); - } - - CWalletTx(const CWallet* pwalletIn, const CTransaction& txIn) : CMerkleTx(txIn) + CWalletTx(const CWallet* pwalletIn, CTransactionRef arg) : CMerkleTx(std::move(arg)) { Init(pwalletIn); } @@ -314,7 +321,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { if (ser_action.ForRead()) Init(NULL); char fSpent = false; @@ -447,8 +454,9 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(vchPrivKey); READWRITE(nTimeCreated); @@ -492,8 +500,9 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); //! Note: strAccount is serialized as part of the key, not here. READWRITE(nCreditDebit); @@ -506,7 +515,7 @@ public: if (!(mapValue.empty() && _ssExtra.empty())) { - CDataStream ss(nType, nVersion); + CDataStream ss(s.GetType(), s.GetVersion()); ss.insert(ss.begin(), '\0'); ss << mapValue; ss.insert(ss.end(), _ssExtra.begin(), _ssExtra.end()); @@ -522,7 +531,7 @@ public: mapValue.clear(); if (std::string::npos != nSepPos) { - CDataStream ss(std::vector<char>(strComment.begin() + nSepPos + 1, strComment.end()), nType, nVersion); + CDataStream ss(std::vector<char>(strComment.begin() + nSepPos + 1, strComment.end()), s.GetType(), s.GetVersion()); ss >> mapValue; _ssExtra = std::vector<char>(ss.begin(), ss.end()); } @@ -596,7 +605,7 @@ public: */ mutable CCriticalSection cs_wallet; - std::string strWalletFile; + const std::string strWalletFile; void LoadKeyPool(int nIndex, const CKeyPool &keypool) { @@ -621,11 +630,9 @@ public: SetNull(); } - CWallet(const std::string& strWalletFileIn) + CWallet(const std::string& strWalletFileIn) : strWalletFile(strWalletFileIn) { SetNull(); - - strWalletFile = strWalletFileIn; fFileBacked = true; } @@ -774,7 +781,7 @@ public: */ bool CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletTx& wtxNew, CReserveKey& reservekey, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, const CCoinControl *coinControl = NULL, bool sign = true); - bool CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CConnman* connman); + bool CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CConnman* connman, CValidationState& state); void ListAccountCreditDebit(const std::string& strAccount, std::list<CAccountingEntry>& entries); bool AddAccountingEntry(const CAccountingEntry&); @@ -912,6 +919,12 @@ public: /* Initializes the wallet, returns a new CWallet instance or a null pointer in case of an error */ static bool InitLoadWallet(); + /** + * Wallet post-init setup + * Gives the wallet a chance to register repetitive tasks and complete post-init tasks + */ + void postInitProcess(boost::thread_group& threadGroup); + /* Wallets parameter interaction */ static bool ParameterInteraction(); @@ -979,8 +992,9 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { - if (!(nType & SER_GETHASH)) + inline void SerializationOp(Stream& s, Operation ser_action) { + int nVersion = s.GetVersion(); + if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(vchPubKey); } diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 80bfe8255d..0395e4a072 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -7,7 +7,7 @@ #include "base58.h" #include "consensus/validation.h" -#include "main.h" // For CheckTransaction +#include "validation.h" // For CheckTransaction #include "protocol.h" #include "serialize.h" #include "sync.h" @@ -251,82 +251,6 @@ void CWalletDB::ListAccountCreditDebit(const string& strAccount, list<CAccountin pcursor->close(); } -DBErrors CWalletDB::ReorderTransactions(CWallet* pwallet) -{ - LOCK(pwallet->cs_wallet); - // Old wallets didn't have any defined order for transactions - // Probably a bad idea to change the output of this - - // First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap. - typedef pair<CWalletTx*, CAccountingEntry*> TxPair; - typedef multimap<int64_t, TxPair > TxItems; - TxItems txByTime; - - for (map<uint256, CWalletTx>::iterator it = pwallet->mapWallet.begin(); it != pwallet->mapWallet.end(); ++it) - { - CWalletTx* wtx = &((*it).second); - txByTime.insert(make_pair(wtx->nTimeReceived, TxPair(wtx, (CAccountingEntry*)0))); - } - list<CAccountingEntry> acentries; - ListAccountCreditDebit("", acentries); - BOOST_FOREACH(CAccountingEntry& entry, acentries) - { - txByTime.insert(make_pair(entry.nTime, TxPair((CWalletTx*)0, &entry))); - } - - int64_t& nOrderPosNext = pwallet->nOrderPosNext; - nOrderPosNext = 0; - std::vector<int64_t> nOrderPosOffsets; - for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it) - { - CWalletTx *const pwtx = (*it).second.first; - CAccountingEntry *const pacentry = (*it).second.second; - int64_t& nOrderPos = (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos; - - if (nOrderPos == -1) - { - nOrderPos = nOrderPosNext++; - nOrderPosOffsets.push_back(nOrderPos); - - if (pwtx) - { - if (!WriteTx(*pwtx)) - return DB_LOAD_FAIL; - } - else - if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; - } - else - { - int64_t nOrderPosOff = 0; - BOOST_FOREACH(const int64_t& nOffsetStart, nOrderPosOffsets) - { - if (nOrderPos >= nOffsetStart) - ++nOrderPosOff; - } - nOrderPos += nOrderPosOff; - nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1); - - if (!nOrderPosOff) - continue; - - // Since we're changing the order, write it back - if (pwtx) - { - if (!WriteTx(*pwtx)) - return DB_LOAD_FAIL; - } - else - if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; - } - } - WriteOrderPosNext(nOrderPosNext); - - return DB_LOAD_OK; -} - class CWalletScanState { public: unsigned int nKeys; @@ -711,7 +635,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) WriteVersion(CLIENT_VERSION); if (wss.fAnyUnordered) - result = ReorderTransactions(pwallet); + result = pwallet->ReorderTransactions(); pwallet->laccentries.clear(); ListAccountCreditDebit("*", pwallet->laccentries); diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 9c9d4922a4..eb25ac613d 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -54,7 +54,7 @@ public: CHDChain() { SetNull(); } ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nExternalChainCounter); @@ -93,7 +93,7 @@ public: ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) { + inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nCreateTime); if (this->nVersion >= VERSION_WITH_HDDATA) @@ -153,6 +153,7 @@ public: /// This writes directly to the database, and will not update the CWallet's cached accounting entries! /// Use wallet.AddAccountingEntry instead, to write *and* update its caches. + bool WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry); bool WriteAccountingEntry_Backend(const CAccountingEntry& acentry); bool ReadAccount(const std::string& strAccount, CAccount& account); bool WriteAccount(const std::string& strAccount, const CAccount& account); @@ -165,7 +166,6 @@ public: CAmount GetAccountCreditDebit(const std::string& strAccount); void ListAccountCreditDebit(const std::string& strAccount, std::list<CAccountingEntry>& acentries); - DBErrors ReorderTransactions(CWallet* pwallet); DBErrors LoadWallet(CWallet* pwallet); DBErrors FindWalletTx(CWallet* pwallet, std::vector<uint256>& vTxHash, std::vector<CWalletTx>& vWtx); DBErrors ZapWalletTx(CWallet* pwallet, std::vector<CWalletTx>& vWtx); @@ -180,7 +180,6 @@ private: CWalletDB(const CWalletDB&); void operator=(const CWalletDB&); - bool WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry); }; void ThreadFlushWalletDB(const std::string& strFile); diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index a0196fe184..a7e20a8356 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -6,7 +6,7 @@ #include "zmqpublishnotifier.h" #include "version.h" -#include "main.h" +#include "validation.h" #include "streams.h" #include "util.h" diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp index b6c907980f..a11256dfd5 100644 --- a/src/zmq/zmqpublishnotifier.cpp +++ b/src/zmq/zmqpublishnotifier.cpp @@ -3,9 +3,11 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chainparams.h" +#include "streams.h" #include "zmqpublishnotifier.h" -#include "main.h" +#include "validation.h" #include "util.h" +#include "rpc/server.h" static std::multimap<std::string, CZMQAbstractPublishNotifier*> mapPublishNotifiers; @@ -165,7 +167,7 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex) LogPrint("zmq", "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex()); const Consensus::Params& consensusParams = Params().GetConsensus(); - CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); { LOCK(cs_main); CBlock block; @@ -185,7 +187,7 @@ bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &tr { uint256 hash = transaction.GetHash(); LogPrint("zmq", "zmq: Publish rawtx %s\n", hash.GetHex()); - CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); + CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ss << transaction; return SendMessage(MSG_RAWTX, &(*ss.begin()), ss.size()); } |