diff options
Diffstat (limited to 'src')
75 files changed, 726 insertions, 260 deletions
diff --git a/src/.clang-tidy b/src/.clang-tidy index b2e6914548..3ad463e99b 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -16,4 +16,4 @@ WarningsAsErrors: '*' CheckOptions: - key: performance-move-const-arg.CheckTriviallyCopyableMove value: false -HeaderFilterRegex: './qt' +HeaderFilterRegex: '.' diff --git a/src/Makefile.am b/src/Makefile.am index 35b0ad24c3..5830090ada 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -20,6 +20,7 @@ noinst_LTLIBRARIES = bin_PROGRAMS = noinst_PROGRAMS = +check_PROGRAMS = TESTS = BENCHMARKS = diff --git a/src/Makefile.minisketch.include b/src/Makefile.minisketch.include index b337f48349..1363bec34e 100644 --- a/src/Makefile.minisketch.include +++ b/src/Makefile.minisketch.include @@ -31,7 +31,7 @@ if ENABLE_TESTS if !ENABLE_FUZZ MINISKETCH_TEST = minisketch/test TESTS += $(MINISKETCH_TEST) -noinst_PROGRAMS += $(MINISKETCH_TEST) +check_PROGRAMS += $(MINISKETCH_TEST) minisketch_test_SOURCES = $(MINISKETCH_TEST_SOURCES_INT) minisketch_test_CPPFLAGS = $(AM_CPPFLAGS) $(LIBMINISKETCH_CPPFLAGS) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 7c954dbf3a..7be13c8f1e 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -190,7 +190,7 @@ std::optional<bilingual_str> LoadAddrman(const NetGroupManager& netgroupman, con const auto path_addr{args.GetDataDirNet() / "peers.dat"}; try { DeserializeFileDB(path_addr, *addrman, CLIENT_VERSION); - LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start)); + LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start)); } catch (const DbNotFoundError&) { // Addrman can be in an inconsistent state after failure, reset it addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman); diff --git a/src/addrman.cpp b/src/addrman.cpp index f86bdfa3df..a740760faf 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -291,6 +291,7 @@ void AddrManImpl::Unserialize(Stream& s_) mapAddr[info] = n; info.nRandomPos = vRandom.size(); vRandom.push_back(n); + m_network_counts[info.GetNetwork()].n_new++; } nIdCount = nNew; @@ -310,6 +311,7 @@ void AddrManImpl::Unserialize(Stream& s_) mapAddr[info] = nIdCount; vvTried[nKBucket][nKBucketPos] = nIdCount; nIdCount++; + m_network_counts[info.GetNetwork()].n_tried++; } else { nLost++; } @@ -425,6 +427,8 @@ AddrInfo* AddrManImpl::Create(const CAddress& addr, const CNetAddr& addrSource, mapAddr[addr] = nId; mapInfo[nId].nRandomPos = vRandom.size(); vRandom.push_back(nId); + nNew++; + m_network_counts[addr.GetNetwork()].n_new++; if (pnId) *pnId = nId; return &mapInfo[nId]; @@ -464,6 +468,7 @@ void AddrManImpl::Delete(int nId) assert(info.nRefCount == 0); SwapRandom(info.nRandomPos, vRandom.size() - 1); + m_network_counts[info.GetNetwork()].n_new--; vRandom.pop_back(); mapAddr.erase(info); mapInfo.erase(nId); @@ -504,6 +509,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId) } } nNew--; + m_network_counts[info.GetNetwork()].n_new--; assert(info.nRefCount == 0); @@ -522,6 +528,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId) infoOld.fInTried = false; vvTried[nKBucket][nKBucketPos] = -1; nTried--; + m_network_counts[infoOld.GetNetwork()].n_tried--; // find which new bucket it belongs to int nUBucket = infoOld.GetNewBucket(nKey, m_netgroupman); @@ -533,6 +540,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId) infoOld.nRefCount = 1; vvNew[nUBucket][nUBucketPos] = nIdEvict; nNew++; + m_network_counts[infoOld.GetNetwork()].n_new++; LogPrint(BCLog::ADDRMAN, "Moved %s from tried[%i][%i] to new[%i][%i] to make space\n", infoOld.ToString(), nKBucket, nKBucketPos, nUBucket, nUBucketPos); } @@ -541,6 +549,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId) vvTried[nKBucket][nKBucketPos] = nId; nTried++; info.fInTried = true; + m_network_counts[info.GetNetwork()].n_tried++; } bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::chrono::seconds time_penalty) @@ -591,7 +600,6 @@ bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::c } else { pinfo = Create(addr, source, &nId); pinfo->nTime = std::max(NodeSeconds{0s}, pinfo->nTime - time_penalty); - nNew++; } int nUBucket = pinfo->GetNewBucket(nKey, source, m_netgroupman); @@ -962,6 +970,28 @@ std::optional<AddressPosition> AddrManImpl::FindAddressEntry_(const CAddress& ad } } +size_t AddrManImpl::Size_(std::optional<Network> net, std::optional<bool> in_new) const +{ + AssertLockHeld(cs); + + if (!net.has_value()) { + if (in_new.has_value()) { + return *in_new ? nNew : nTried; + } else { + return vRandom.size(); + } + } + if (auto it = m_network_counts.find(*net); it != m_network_counts.end()) { + auto net_count = it->second; + if (in_new.has_value()) { + return *in_new ? net_count.n_new : net_count.n_tried; + } else { + return net_count.n_new + net_count.n_tried; + } + } + return 0; +} + void AddrManImpl::Check() const { AssertLockHeld(cs); @@ -986,6 +1016,7 @@ int AddrManImpl::CheckAddrman() const std::unordered_set<int> setTried; std::unordered_map<int, int> mapNew; + std::unordered_map<Network, NewTriedCount> local_counts; if (vRandom.size() != (size_t)(nTried + nNew)) return -7; @@ -1000,12 +1031,14 @@ int AddrManImpl::CheckAddrman() const if (info.nRefCount) return -2; setTried.insert(n); + local_counts[info.GetNetwork()].n_tried++; } else { if (info.nRefCount < 0 || info.nRefCount > ADDRMAN_NEW_BUCKETS_PER_ADDRESS) return -3; if (!info.nRefCount) return -4; mapNew[n] = info.nRefCount; + local_counts[info.GetNetwork()].n_new++; } const auto it{mapAddr.find(info)}; if (it == mapAddr.end() || it->second != n) { @@ -1065,13 +1098,27 @@ int AddrManImpl::CheckAddrman() const if (nKey.IsNull()) return -16; + // It's possible that m_network_counts may have all-zero entries that local_counts + // doesn't have if addrs from a network were being added and then removed again in the past. + if (m_network_counts.size() < local_counts.size()) { + return -20; + } + for (const auto& [net, count] : m_network_counts) { + if (local_counts[net].n_new != count.n_new || local_counts[net].n_tried != count.n_tried) { + return -21; + } + } + return 0; } -size_t AddrManImpl::size() const +size_t AddrManImpl::Size(std::optional<Network> net, std::optional<bool> in_new) const { - LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead - return vRandom.size(); + LOCK(cs); + Check(); + auto ret = Size_(net, in_new); + Check(); + return ret; } bool AddrManImpl::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty) @@ -1185,9 +1232,9 @@ template void AddrMan::Unserialize(CHashVerifier<CAutoFile>& s); template void AddrMan::Unserialize(CDataStream& s); template void AddrMan::Unserialize(CHashVerifier<CDataStream>& s); -size_t AddrMan::size() const +size_t AddrMan::Size(std::optional<Network> net, std::optional<bool> in_new) const { - return m_impl->size(); + return m_impl->Size(net, in_new); } bool AddrMan::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty) diff --git a/src/addrman.h b/src/addrman.h index 0f1f808fa1..2a074268be 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -99,8 +99,14 @@ public: template <typename Stream> void Unserialize(Stream& s_); - //! Return the number of (unique) addresses in all tables. - size_t size() const; + /** + * Return size information about addrman. + * + * @param[in] net Select addresses only from specified network (nullopt = all) + * @param[in] in_new Select addresses only from one table (true = new, false = tried, nullopt = both) + * @return Number of unique addresses that match specified options. + */ + size_t Size(std::optional<Network> net = {}, std::optional<bool> in_new = {}) const; /** * Attempt to add one or more addresses to addrman's new table. diff --git a/src/addrman_impl.h b/src/addrman_impl.h index 39754b673e..94fe81aca9 100644 --- a/src/addrman_impl.h +++ b/src/addrman_impl.h @@ -112,7 +112,7 @@ public: template <typename Stream> void Unserialize(Stream& s_) EXCLUSIVE_LOCKS_REQUIRED(!cs); - size_t size() const EXCLUSIVE_LOCKS_REQUIRED(!cs); + size_t Size(std::optional<Network> net, std::optional<bool> in_new) const EXCLUSIVE_LOCKS_REQUIRED(!cs); bool Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty) EXCLUSIVE_LOCKS_REQUIRED(!cs); @@ -215,6 +215,14 @@ private: /** Reference to the netgroup manager. netgroupman must be constructed before addrman and destructed after. */ const NetGroupManager& m_netgroupman; + struct NewTriedCount { + size_t n_new; + size_t n_tried; + }; + + /** Number of entries in addrman per network and new/tried table. */ + std::unordered_map<Network, NewTriedCount> m_network_counts GUARDED_BY(cs); + //! Find an entry. AddrInfo* Find(const CService& addr, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -257,6 +265,8 @@ private: std::optional<AddressPosition> FindAddressEntry_(const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(cs); + size_t Size_(std::optional<Network> net, std::optional<bool> in_new) const EXCLUSIVE_LOCKS_REQUIRED(cs); + //! Consistency check, taking into account m_consistency_check_ratio. //! Will std::abort if an inconsistency is detected. void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs); diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp index bd524e7458..cf8d807d7b 100644 --- a/src/bench/crypto_hash.cpp +++ b/src/bench/crypto_hash.cpp @@ -18,7 +18,7 @@ /* Number of bytes to hash per iteration */ static const uint64_t BUFFER_SIZE = 1000*1000; -static void RIPEMD160(benchmark::Bench& bench) +static void BenchRIPEMD160(benchmark::Bench& bench) { uint8_t hash[CRIPEMD160::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); @@ -150,7 +150,7 @@ static void MuHashPrecompute(benchmark::Bench& bench) }); } -BENCHMARK(RIPEMD160, benchmark::PriorityLevel::HIGH); +BENCHMARK(BenchRIPEMD160, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA1, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA512, benchmark::PriorityLevel::HIGH); diff --git a/src/coins.cpp b/src/coins.cpp index 976118e23c..e98bf816ab 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -13,7 +13,7 @@ bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; } uint256 CCoinsView::GetBestBlock() const { return uint256(); } std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); } -bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; } +bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return false; } std::unique_ptr<CCoinsViewCursor> CCoinsView::Cursor() const { return nullptr; } bool CCoinsView::HaveCoin(const COutPoint &outpoint) const @@ -28,11 +28,11 @@ bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base-> uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); } void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; } -bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); } +bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return base->BatchWrite(mapCoins, hashBlock, erase); } std::unique_ptr<CCoinsViewCursor> CCoinsViewBacked::Cursor() const { return base->Cursor(); } size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } -CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), cachedCoinsUsage(0) {} +CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn) : CCoinsViewBacked(baseIn) {} size_t CCoinsViewCache::DynamicMemoryUsage() const { return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage; @@ -176,8 +176,10 @@ void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) { hashBlock = hashBlockIn; } -bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn) { - for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = mapCoins.erase(it)) { +bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn, bool erase) { + for (CCoinsMap::iterator it = mapCoins.begin(); + it != mapCoins.end(); + it = erase ? mapCoins.erase(it) : std::next(it)) { // Ignore non-dirty entries (optimization). if (!(it->second.flags & CCoinsCacheEntry::DIRTY)) { continue; @@ -190,7 +192,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn // Create the coin in the parent cache, move the data up // and mark it as dirty. CCoinsCacheEntry& entry = cacheCoins[it->first]; - entry.coin = std::move(it->second.coin); + if (erase) { + // The `move` call here is purely an optimization; we rely on the + // `mapCoins.erase` call in the `for` expression to actually remove + // the entry from the child map. + entry.coin = std::move(it->second.coin); + } else { + entry.coin = it->second.coin; + } cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); entry.flags = CCoinsCacheEntry::DIRTY; // We can mark it FRESH in the parent if it was FRESH in the child @@ -218,7 +227,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn } else { // A normal modification. cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); - itUs->second.coin = std::move(it->second.coin); + if (erase) { + // The `move` call here is purely an optimization; we rely on the + // `mapCoins.erase` call in the `for` expression to actually remove + // the entry from the child map. + itUs->second.coin = std::move(it->second.coin); + } else { + itUs->second.coin = it->second.coin; + } cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); itUs->second.flags |= CCoinsCacheEntry::DIRTY; // NOTE: It isn't safe to mark the coin as FRESH in the parent @@ -233,12 +249,32 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn } bool CCoinsViewCache::Flush() { - bool fOk = base->BatchWrite(cacheCoins, hashBlock); - cacheCoins.clear(); + bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/true); + if (fOk && !cacheCoins.empty()) { + /* BatchWrite must erase all cacheCoins elements when erase=true. */ + throw std::logic_error("Not all cached coins were erased"); + } cachedCoinsUsage = 0; return fOk; } +bool CCoinsViewCache::Sync() +{ + bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/false); + // Instead of clearing `cacheCoins` as we would in Flush(), just clear the + // FRESH/DIRTY flags of any coin that isn't spent. + for (auto it = cacheCoins.begin(); it != cacheCoins.end(); ) { + if (it->second.coin.IsSpent()) { + cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); + it = cacheCoins.erase(it); + } else { + it->second.flags = 0; + ++it; + } + } + return fOk; +} + void CCoinsViewCache::Uncache(const COutPoint& hash) { CCoinsMap::iterator it = cacheCoins.find(hash); diff --git a/src/coins.h b/src/coins.h index b0d6bdf333..710b8c7c83 100644 --- a/src/coins.h +++ b/src/coins.h @@ -176,7 +176,7 @@ public: //! Do a bulk modification (multiple Coin changes + BestBlock change). //! The passed mapCoins can be modified. - virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock); + virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true); //! Get a cursor to iterate over the whole state virtual std::unique_ptr<CCoinsViewCursor> Cursor() const; @@ -202,7 +202,7 @@ public: uint256 GetBestBlock() const override; std::vector<uint256> GetHeadBlocks() const override; void SetBackend(CCoinsView &viewIn); - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override; std::unique_ptr<CCoinsViewCursor> Cursor() const override; size_t EstimateSize() const override; }; @@ -220,7 +220,7 @@ protected: mutable CCoinsMap cacheCoins; /* Cached dynamic memory usage for the inner Coin objects. */ - mutable size_t cachedCoinsUsage; + mutable size_t cachedCoinsUsage{0}; public: CCoinsViewCache(CCoinsView *baseIn); @@ -235,7 +235,7 @@ public: bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; void SetBestBlock(const uint256 &hashBlock); - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override; std::unique_ptr<CCoinsViewCursor> Cursor() const override { throw std::logic_error("CCoinsViewCache cursor iteration not supported."); } @@ -282,13 +282,23 @@ public: bool SpendCoin(const COutPoint &outpoint, Coin* moveto = nullptr); /** - * Push the modifications applied to this cache to its base. - * Failure to call this method before destruction will cause the changes to be forgotten. + * Push the modifications applied to this cache to its base and wipe local state. + * Failure to call this method or Sync() before destruction will cause the changes + * to be forgotten. * If false is returned, the state of this cache (and its backing view) will be undefined. */ bool Flush(); /** + * Push the modifications applied to this cache to its base while retaining + * the contents of this cache (except for spent coins, which we erase). + * Failure to call this method or Flush() before destruction will cause the changes + * to be forgotten. + * If false is returned, the state of this cache (and its backing view) will be undefined. + */ + bool Sync(); + + /** * Removes the UTXO with the given outpoint from the cache, if it is * not modified. */ diff --git a/src/core_write.cpp b/src/core_write.cpp index 91a6eb2864..300bd30e43 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -170,6 +170,8 @@ void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool i void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry, bool include_hex, int serialize_flags, const CTxUndo* txundo, TxVerbosity verbosity) { + CHECK_NONFATAL(verbosity >= TxVerbosity::SHOW_DETAILS); + entry.pushKV("txid", tx.GetHash().GetHex()); entry.pushKV("hash", tx.GetWitnessHash().GetHex()); // Transaction version is actually unsigned in consensus checks, just signed in memory, diff --git a/src/crypto/ripemd160.cpp b/src/crypto/ripemd160.cpp index 29a4ad906f..a2f7c6e156 100644 --- a/src/crypto/ripemd160.cpp +++ b/src/crypto/ripemd160.cpp @@ -239,7 +239,7 @@ void Transform(uint32_t* s, const unsigned char* chunk) ////// RIPEMD160 -CRIPEMD160::CRIPEMD160() : bytes(0) +CRIPEMD160::CRIPEMD160() { ripemd160::Initialize(s); } diff --git a/src/crypto/ripemd160.h b/src/crypto/ripemd160.h index ae9c339181..fb631a66d2 100644 --- a/src/crypto/ripemd160.h +++ b/src/crypto/ripemd160.h @@ -14,7 +14,7 @@ class CRIPEMD160 private: uint32_t s[5]; unsigned char buf[64]; - uint64_t bytes; + uint64_t bytes{0}; public: static const size_t OUTPUT_SIZE = 20; diff --git a/src/crypto/sha1.cpp b/src/crypto/sha1.cpp index 1fb9bb2b72..2610108f60 100644 --- a/src/crypto/sha1.cpp +++ b/src/crypto/sha1.cpp @@ -146,7 +146,7 @@ void Transform(uint32_t* s, const unsigned char* chunk) ////// SHA1 -CSHA1::CSHA1() : bytes(0) +CSHA1::CSHA1() { sha1::Initialize(s); } diff --git a/src/crypto/sha1.h b/src/crypto/sha1.h index 4bd6c331a8..741cdaad58 100644 --- a/src/crypto/sha1.h +++ b/src/crypto/sha1.h @@ -14,7 +14,7 @@ class CSHA1 private: uint32_t s[5]; unsigned char buf[64]; - uint64_t bytes; + uint64_t bytes{0}; public: static const size_t OUTPUT_SIZE = 20; diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 7cd5b3661b..a4eef36480 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -673,7 +673,7 @@ std::string SHA256AutoDetect() ////// SHA-256 -CSHA256::CSHA256() : bytes(0) +CSHA256::CSHA256() { sha256::Initialize(s); } diff --git a/src/crypto/sha256.h b/src/crypto/sha256.h index 9fd73becfd..7625508665 100644 --- a/src/crypto/sha256.h +++ b/src/crypto/sha256.h @@ -15,7 +15,7 @@ class CSHA256 private: uint32_t s[8]; unsigned char buf[64]; - uint64_t bytes; + uint64_t bytes{0}; public: static const size_t OUTPUT_SIZE = 32; diff --git a/src/crypto/sha512.cpp b/src/crypto/sha512.cpp index 8a822e0e7e..2713f06210 100644 --- a/src/crypto/sha512.cpp +++ b/src/crypto/sha512.cpp @@ -151,7 +151,7 @@ void Transform(uint64_t* s, const unsigned char* chunk) ////// SHA-512 -CSHA512::CSHA512() : bytes(0) +CSHA512::CSHA512() { sha512::Initialize(s); } diff --git a/src/crypto/sha512.h b/src/crypto/sha512.h index d8fa8d2e39..d2f7d6a05e 100644 --- a/src/crypto/sha512.h +++ b/src/crypto/sha512.h @@ -14,7 +14,7 @@ class CSHA512 private: uint64_t s[8]; unsigned char buf[128]; - uint64_t bytes; + uint64_t bytes{0}; public: static constexpr size_t OUTPUT_SIZE = 64; diff --git a/src/cuckoocache.h b/src/cuckoocache.h index 6adcc74516..cb0b362143 100644 --- a/src/cuckoocache.h +++ b/src/cuckoocache.h @@ -166,7 +166,7 @@ private: std::vector<Element> table; /** size stores the total available slots in the hash table */ - uint32_t size; + uint32_t size{0}; /** The bit_packed_atomic_flags array is marked mutable because we want * garbage collection to be allowed to occur from const methods */ @@ -183,7 +183,7 @@ private: * decremented on insert and reset to the new number of inserts which would * cause the epoch to reach epoch_size when it reaches zero. */ - uint32_t epoch_heuristic_counter; + uint32_t epoch_heuristic_counter{0}; /** epoch_size is set to be the number of elements supposed to be in a * epoch. When the number of non-erased elements in an epoch @@ -193,12 +193,12 @@ private: * one "dead" which has been erased, one "dying" which has been marked to be * erased next, and one "living" which new inserts add to. */ - uint32_t epoch_size; + uint32_t epoch_size{0}; /** depth_limit determines how many elements insert should try to replace. * Should be set to log2(n). */ - uint8_t depth_limit; + uint8_t depth_limit{0}; /** hash_function is a const instance of the hash function. It cannot be * static or initialized at call time as it may have internal state (such as @@ -322,8 +322,7 @@ public: /** You must always construct a cache with some elements via a subsequent * call to setup or setup_bytes, otherwise operations may segfault. */ - cache() : table(), size(), collection_flags(0), epoch_flags(), - epoch_heuristic_counter(), epoch_size(), depth_limit(0), hash_function() + cache() : table(), collection_flags(0), epoch_flags(), hash_function() { } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index f47bd8188e..b389d039fb 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -71,13 +71,13 @@ private: DataStream ssKey{}; CDataStream ssValue; - size_t size_estimate; + size_t size_estimate{0}; public: /** * @param[in] _parent CDBWrapper that this batch is to be submitted to */ - explicit CDBBatch(const CDBWrapper& _parent) : parent(_parent), ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0){}; + explicit CDBBatch(const CDBWrapper& _parent) : parent(_parent), ssValue(SER_DISK, CLIENT_VERSION){}; void Clear() { diff --git a/src/hash.h b/src/hash.h index 6500f1c709..2e3ed11b43 100644 --- a/src/hash.h +++ b/src/hash.h @@ -12,6 +12,7 @@ #include <crypto/sha256.h> #include <prevector.h> #include <serialize.h> +#include <span.h> #include <uint256.h> #include <version.h> @@ -166,6 +167,39 @@ public: }; /** Reads data from an underlying stream, while hashing the read data. */ +template <typename Source> +class HashVerifier : public HashWriter +{ +private: + Source& m_source; + +public: + explicit HashVerifier(Source& source LIFETIMEBOUND) : m_source{source} {} + + void read(Span<std::byte> dst) + { + m_source.read(dst); + this->write(dst); + } + + void ignore(size_t num_bytes) + { + std::byte data[1024]; + while (num_bytes > 0) { + size_t now = std::min<size_t>(num_bytes, 1024); + read({data, now}); + num_bytes -= now; + } + } + + template <typename T> + HashVerifier<Source>& operator>>(T&& obj) + { + ::Unserialize(*this, obj); + return *this; + } +}; + template<typename Source> class CHashVerifier : public CHashWriter { @@ -248,4 +282,12 @@ void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char he */ HashWriter TaggedHash(const std::string& tag); +/** Compute the 160-bit RIPEMD-160 hash of an array. */ +inline uint160 RIPEMD160(Span<const unsigned char> data) +{ + uint160 result; + CRIPEMD160().Write(data.data(), data.size()).Finalize(result.begin()); + return result; +} + #endif // BITCOIN_HASH_H diff --git a/src/index/base.cpp b/src/index/base.cpp index a8b8cbe8a9..1d5c0dbe24 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -415,8 +415,9 @@ IndexSummary BaseIndex::GetSummary() const return summary; } -void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) { - assert(!node::fPruneMode || AllowPrune()); +void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) +{ + assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune()); if (AllowPrune() && block) { node::PruneLockInfo prune_lock; diff --git a/src/init.cpp b/src/init.cpp index 75eb114163..5b486854e0 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1499,7 +1499,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) options.mempool = Assert(node.mempool.get()); options.reindex = node::fReindex; options.reindex_chainstate = fReindexChainState; - options.prune = node::fPruneMode; + options.prune = chainman.m_blockman.IsPruneMode(); options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS); options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL); options.check_interrupt = ShutdownRequested; @@ -1609,7 +1609,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, perform the initial blockstore prune // after any wallet rescanning has taken place. - if (fPruneMode) { + if (chainman.m_blockman.IsPruneMode()) { if (!fReindex) { LOCK(cs_main); for (Chainstate* chainstate : chainman.GetAll()) { @@ -1637,8 +1637,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // On first startup, warn on low block storage space if (!fReindex && !fReindexChainState && chain_active_height <= 1) { - uint64_t additional_bytes_needed = fPruneMode ? nPruneTarget - : chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024; + uint64_t additional_bytes_needed{ + chainman.m_blockman.IsPruneMode() ? + chainman.m_blockman.GetPruneTarget() : + chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024}; if (!CheckDiskSpace(args.GetBlocksDirPath(), additional_bytes_needed)) { InitWarning(strprintf(_( @@ -42,10 +42,10 @@ public: private: //! Whether this private key is valid. We check for correctness when modifying the key //! data, so fValid should always correspond to the actual state. - bool fValid; + bool fValid{false}; //! Whether the public key corresponding to this private key is (to be) compressed. - bool fCompressed; + bool fCompressed{false}; //! The actual byte data std::vector<unsigned char, secure_allocator<unsigned char> > keydata; @@ -55,7 +55,7 @@ private: public: //! Construct an invalid private key. - CKey() : fValid(false), fCompressed(false) + CKey() { // Important: vch must be 32 bytes in length to not break serialization keydata.resize(32); diff --git a/src/mapport.h b/src/mapport.h index 279d65167f..6f55c46f6c 100644 --- a/src/mapport.h +++ b/src/mapport.h @@ -5,17 +5,9 @@ #ifndef BITCOIN_MAPPORT_H #define BITCOIN_MAPPORT_H -#ifdef USE_UPNP -static constexpr bool DEFAULT_UPNP = USE_UPNP; -#else static constexpr bool DEFAULT_UPNP = false; -#endif // USE_UPNP -#ifdef USE_NATPMP -static constexpr bool DEFAULT_NATPMP = USE_NATPMP; -#else static constexpr bool DEFAULT_NATPMP = false; -#endif // USE_NATPMP enum MapPortProtoFlag : unsigned int { NONE = 0x00, diff --git a/src/net.cpp b/src/net.cpp index 960d0ee841..98500bd30e 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1305,15 +1305,14 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes, RecordBytesRecv(nBytes); if (notify) { size_t nSizeAdded = 0; - auto it(pnode->vRecvMsg.begin()); - for (; it != pnode->vRecvMsg.end(); ++it) { + for (const auto& msg : pnode->vRecvMsg) { // vRecvMsg contains only completed CNetMessage // the single possible partially deserialized message are held by TransportDeserializer - nSizeAdded += it->m_raw_message_size; + nSizeAdded += msg.m_raw_message_size; } { LOCK(pnode->cs_vProcessMsg); - pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it); + pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg); pnode->nProcessQueueSize += nSizeAdded; pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize; } @@ -1399,7 +1398,7 @@ void CConnman::ThreadDNSAddressSeed() if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) { // When -forcednsseed is provided, query all. seeds_right_now = seeds.size(); - } else if (addrman.size() == 0) { + } else if (addrman.Size() == 0) { // If we have no known peers, query all. // This will occur on the first run, or if peers.dat has been // deleted. @@ -1418,13 +1417,13 @@ void CConnman::ThreadDNSAddressSeed() // * If we continue having problems, eventually query all the // DNS seeds, and if that fails too, also try the fixed seeds. // (done in ThreadOpenConnections) - const std::chrono::seconds seeds_wait_time = (addrman.size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS); + const std::chrono::seconds seeds_wait_time = (addrman.Size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS); for (const std::string& seed : seeds) { if (seeds_right_now == 0) { seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE; - if (addrman.size() > 0) { + if (addrman.Size() > 0) { LogPrintf("Waiting %d seconds before querying DNS seeds.\n", seeds_wait_time.count()); std::chrono::seconds to_wait = seeds_wait_time; while (to_wait.count() > 0) { @@ -1504,7 +1503,7 @@ void CConnman::DumpAddresses() DumpPeerAddresses(::gArgs, addrman); LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", - addrman.size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start)); + addrman.Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start)); } void CConnman::ProcessAddrFetch() @@ -1575,6 +1574,19 @@ int CConnman::GetExtraBlockRelayCount() const return std::max(block_relay_peers - m_max_outbound_block_relay, 0); } +std::unordered_set<Network> CConnman::GetReachableEmptyNetworks() const +{ + std::unordered_set<Network> networks{}; + for (int n = 0; n < NET_MAX; n++) { + enum Network net = (enum Network)n; + if (net == NET_UNROUTABLE || net == NET_INTERNAL) continue; + if (IsReachable(net) && addrman.Size(net, std::nullopt) == 0) { + networks.insert(net); + } + } + return networks; +} + void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) { SetSyscallSandboxPolicy(SyscallSandboxPolicy::NET_OPEN_CONNECTION); @@ -1624,7 +1636,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) if (interruptNet) return; - if (add_fixed_seeds && addrman.size() == 0) { + const std::unordered_set<Network> fixed_seed_networks{GetReachableEmptyNetworks()}; + if (add_fixed_seeds && !fixed_seed_networks.empty()) { // When the node starts with an empty peers.dat, there are a few other sources of peers before // we fallback on to fixed seeds: -dnsseed, -seednode, -addnode // If none of those are available, we fallback on to fixed seeds immediately, else we allow @@ -1633,7 +1646,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // It is cheapest to check if enough time has passed first. if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) { add_fixed_seeds_now = true; - LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty\n"); + LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty for at least one reachable network\n"); } // Checking !dnsseed is cheaper before locking 2 mutexes. @@ -1650,14 +1663,12 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // We will not make outgoing connections to peers that are unreachable // (e.g. because of -onlynet configuration). // Therefore, we do not add them to addrman in the first place. - // Note that if you change -onlynet setting from one network to another, - // peers.dat will contain only peers of unreachable networks and - // manual intervention will be needed (either delete peers.dat after - // configuration change or manually add some reachable peer using addnode), - // see <https://github.com/bitcoin/bitcoin/issues/26035> for details. + // In case previously unreachable networks become reachable + // (e.g. in case of -onlynet changes by the user), fixed seeds will + // be loaded only for networks for which we have no addressses. seed_addrs.erase(std::remove_if(seed_addrs.begin(), seed_addrs.end(), - [](const CAddress& addr) { return !IsReachable(addr); }), - seed_addrs.end()); + [&fixed_seed_networks](const CAddress& addr) { return fixed_seed_networks.count(addr.GetNetwork()) == 0; }), + seed_addrs.end()); CNetAddr local; local.SetInternal("fixedseeds"); addrman.Add(seed_addrs, local); @@ -39,6 +39,7 @@ #include <memory> #include <optional> #include <thread> +#include <unordered_set> #include <vector> class AddrMan; @@ -970,6 +971,12 @@ private: void RecordBytesSent(uint64_t bytes) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex); /** + Return reachable networks for which we have no addresses in addrman and therefore + may require loading fixed seeds. + */ + std::unordered_set<Network> GetReachableEmptyNetworks() const; + + /** * Return vector of current BLOCK_RELAY peers. */ std::vector<CAddress> GetCurrentBlockRelayOnlyConns() const; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b5e1222db5..a659300a0d 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -53,9 +53,6 @@ using node::ReadBlockFromDisk; using node::ReadRawBlockFromDisk; -using node::fImporting; -using node::fPruneMode; -using node::fReindex; /** How long to cache transactions in mapRelay for normal relay */ static constexpr auto RELAY_TX_CACHE_TIME = 15min; @@ -1739,8 +1736,7 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) { - if (fImporting) return "Importing..."; - if (fReindex) return "Reindexing..."; + if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ..."; // Ensure this peer exists and hasn't been disconnected PeerRef peer = GetPeerRef(peer_id); @@ -1849,7 +1845,7 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { - LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", new_timeout.count()); + LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); } } } @@ -3697,7 +3693,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); UpdateBlockAvailability(pfrom.GetId(), inv.hash); - if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) { + if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) { // Headers-first is the primary method of announcement on // the network. If a node fell back to sending blocks by // inv, it may be for a re-org, or because we haven't @@ -3830,8 +3826,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // If pruning, don't inv blocks unless we have on disk and are likely to still have // for some reasonable time window (1 hour) that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; - if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) - { + if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } @@ -3907,7 +3902,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - if (fImporting || fReindex) { + if (m_chainman.m_blockman.LoadingBlocks()) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); return; } @@ -4186,7 +4181,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (msg_type == NetMsgType::CMPCTBLOCK) { // Ignore cmpctblock received while importing - if (fImporting || fReindex) { + if (m_chainman.m_blockman.LoadingBlocks()) { LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); return; } @@ -4402,7 +4397,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (msg_type == NetMsgType::BLOCKTXN) { // Ignore blocktxn received while importing - if (fImporting || fReindex) { + if (m_chainman.m_blockman.LoadingBlocks()) { LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); return; } @@ -4477,7 +4472,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (msg_type == NetMsgType::HEADERS) { // Ignore headers received while importing - if (fImporting || fReindex) { + if (m_chainman.m_blockman.LoadingBlocks()) { LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); return; } @@ -4522,7 +4517,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (msg_type == NetMsgType::BLOCK) { // Ignore block received while importing - if (fImporting || fReindex) { + if (m_chainman.m_blockman.LoadingBlocks()) { LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); return; } @@ -5109,7 +5104,7 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers() if (now > m_stale_tip_check_time) { // Check whether our tip is stale, and if so, allow using an extra // outbound peer - if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { + if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", count_seconds(now - m_last_tip_update.load())); m_connman.SetTryNewOutboundPeer(true); @@ -5416,7 +5411,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } } - if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting && !fReindex) { + if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) { // Only actively request headers from a single peer, unless we're close to today. if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) { const CBlockIndex* pindexStart = m_chainman.m_best_header; @@ -5741,7 +5736,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // bandwidth is insufficient. const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX); if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { - LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", m_block_stalling_timeout.load().count()); + LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout)); } return true; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index b8a57acf80..a81099a26c 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -352,7 +352,7 @@ bool BlockManager::LoadBlockIndexDB(const Consensus::Params& consensus_params) } for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) { FlatFilePos pos(*it, 0); - if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { + if (AutoFile{OpenBlockFile(pos, true)}.IsNull()) { return false; } } @@ -454,13 +454,13 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) { // Open history file to append - CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); + AutoFile fileout{OpenUndoFile(pos)}; if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Write index header - unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion()); + unsigned int nSize = GetSerializeSize(blockundo, CLIENT_VERSION); fileout << messageStart << nSize; // Write undo data @@ -489,14 +489,14 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) } // Open history file to read - CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); + AutoFile filein{OpenUndoFile(pos, true)}; if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; - CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data + HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a try { verifier << pindex->pprev->GetBlockHash(); verifier >> blockundo; @@ -768,7 +768,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c { FlatFilePos hpos = pos; hpos.nPos -= 8; // Seek back 8 bytes for meta header - CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION); + AutoFile filein{OpenBlockFile(hpos, true)}; if (filein.IsNull()) { return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString()); } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index cdf667c754..b6007897df 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -48,10 +48,7 @@ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = CMessageHeader::MESSAG extern std::atomic_bool fImporting; extern std::atomic_bool fReindex; -/** Pruning-related variables and constants */ -/** True if we're running in -prune mode. */ extern bool fPruneMode; -/** Number of bytes of block files that we're trying to stay below. */ extern uint64_t nPruneTarget; // Because validation code takes pointers to the map's CBlockIndex objects, if @@ -176,6 +173,17 @@ public: /** Store block on disk. If dbp is not nullptr, then it provides the known position of the block within a block file on disk. */ FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp); + /** Whether running in -prune mode. */ + [[nodiscard]] bool IsPruneMode() const { return fPruneMode; } + + /** Attempt to stay below this number of bytes of block files. */ + [[nodiscard]] uint64_t GetPruneTarget() const { return nPruneTarget; } + + [[nodiscard]] bool LoadingBlocks() const + { + return fImporting || fReindex; + } + /** Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index 99dc319ec0..ba1024d22e 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -44,10 +44,10 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) { LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex()); } - if (nPruneTarget == std::numeric_limits<uint64_t>::max()) { + if (chainman.m_blockman.GetPruneTarget() == std::numeric_limits<uint64_t>::max()) { LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n"); - } else if (nPruneTarget) { - LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", nPruneTarget / 1024 / 1024); + } else if (chainman.m_blockman.GetPruneTarget()) { + LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", chainman.m_blockman.GetPruneTarget() / 1024 / 1024); } LOCK(cs_main); diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 4f3dc99bbf..eda359568f 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -711,8 +711,9 @@ public: LOCK(::cs_main); return chainman().m_blockman.m_have_pruned; } - bool isReadyToBroadcast() override { return !node::fImporting && !node::fReindex && !isInitialBlockDownload(); } - bool isInitialBlockDownload() override { + bool isReadyToBroadcast() override { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); } + bool isInitialBlockDownload() override + { return chainman().ActiveChainstate().IsInitialBlockDownload(); } bool shutdownRequested() override { return ShutdownRequested(); } diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp index bab1b75211..cccf95e552 100644 --- a/src/node/utxo_snapshot.cpp +++ b/src/node/utxo_snapshot.cpp @@ -7,12 +7,17 @@ #include <fs.h> #include <logging.h> #include <streams.h> +#include <sync.h> +#include <tinyformat.h> +#include <txdb.h> #include <uint256.h> #include <util/system.h> #include <validation.h> +#include <cassert> #include <cstdio> #include <optional> +#include <string> namespace node { diff --git a/src/node/utxo_snapshot.h b/src/node/utxo_snapshot.h index b5ed9ef9fe..c5c018c9e8 100644 --- a/src/node/utxo_snapshot.h +++ b/src/node/utxo_snapshot.h @@ -7,13 +7,16 @@ #define BITCOIN_NODE_UTXO_SNAPSHOT_H #include <fs.h> -#include <uint256.h> +#include <kernel/cs_main.h> #include <serialize.h> -#include <validation.h> +#include <sync.h> +#include <uint256.h> +#include <cstdint> #include <optional> +#include <string_view> -extern RecursiveMutex cs_main; +class Chainstate; namespace node { //! Metadata describing a serialized version of a UTXO set from which an diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index e4eb932e5c..d244de1bb2 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -528,7 +528,7 @@ bool CBlockPolicyEstimator::_removeTx(const uint256& hash, bool inBlock) } CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath) - : m_estimation_filepath{estimation_filepath}, nBestSeenHeight{0}, firstRecordedHeight{0}, historicalFirst{0}, historicalBest{0}, trackedTxs{0}, untrackedTxs{0} + : m_estimation_filepath{estimation_filepath} { static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero"); size_t bucketIndex = 0; diff --git a/src/policy/fees.h b/src/policy/fees.h index dd4f031180..1c24b8c7c3 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -242,16 +242,16 @@ public: private: mutable Mutex m_cs_fee_estimator; - unsigned int nBestSeenHeight GUARDED_BY(m_cs_fee_estimator); - unsigned int firstRecordedHeight GUARDED_BY(m_cs_fee_estimator); - unsigned int historicalFirst GUARDED_BY(m_cs_fee_estimator); - unsigned int historicalBest GUARDED_BY(m_cs_fee_estimator); + unsigned int nBestSeenHeight GUARDED_BY(m_cs_fee_estimator){0}; + unsigned int firstRecordedHeight GUARDED_BY(m_cs_fee_estimator){0}; + unsigned int historicalFirst GUARDED_BY(m_cs_fee_estimator){0}; + unsigned int historicalBest GUARDED_BY(m_cs_fee_estimator){0}; struct TxStatsInfo { - unsigned int blockHeight; - unsigned int bucketIndex; - TxStatsInfo() : blockHeight(0), bucketIndex(0) {} + unsigned int blockHeight{0}; + unsigned int bucketIndex{0}; + TxStatsInfo() {} }; // map of txids to information about that transaction @@ -262,8 +262,8 @@ private: std::unique_ptr<TxConfirmStats> shortStats PT_GUARDED_BY(m_cs_fee_estimator); std::unique_ptr<TxConfirmStats> longStats PT_GUARDED_BY(m_cs_fee_estimator); - unsigned int trackedTxs GUARDED_BY(m_cs_fee_estimator); - unsigned int untrackedTxs GUARDED_BY(m_cs_fee_estimator); + unsigned int trackedTxs GUARDED_BY(m_cs_fee_estimator){0}; + unsigned int untrackedTxs GUARDED_BY(m_cs_fee_estimator){0}; std::vector<double> buckets GUARDED_BY(m_cs_fee_estimator); // The upper-bound of the range for the bucket (inclusive) std::map<double, unsigned int> bucketMap GUARDED_BY(m_cs_fee_estimator); // Map of bucket upper-bound to index into all vectors by bucket diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 9e0bc312b8..8bee066ab8 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -460,7 +460,7 @@ static RPCHelpMan getblockfrompeer() // Fetching blocks before the node has syncing past their height can prevent block files from // being pruned, so we avoid it if the node is in prune mode. - if (node::fPruneMode && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) { + if (chainman.m_blockman.IsPruneMode() && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) { throw JSONRPCError(RPC_MISC_ERROR, "In prune mode, only blocks that the node has already synced previously can be fetched from a peer"); } @@ -775,10 +775,11 @@ static RPCHelpMan pruneblockchain() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - if (!node::fPruneMode) + ChainstateManager& chainman = EnsureAnyChainman(request.context); + if (!chainman.m_blockman.IsPruneMode()) { throw JSONRPCError(RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode."); + } - ChainstateManager& chainman = EnsureAnyChainman(request.context); LOCK(cs_main); Chainstate& active_chainstate = chainman.ActiveChainstate(); CChain& active_chain = active_chainstate.m_chain; @@ -1266,15 +1267,15 @@ RPCHelpMan getblockchaininfo() obj.pushKV("initialblockdownload", active_chainstate.IsInitialBlockDownload()); obj.pushKV("chainwork", tip.nChainWork.GetHex()); obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage()); - obj.pushKV("pruned", node::fPruneMode); - if (node::fPruneMode) { + obj.pushKV("pruned", chainman.m_blockman.IsPruneMode()); + if (chainman.m_blockman.IsPruneMode()) { obj.pushKV("pruneheight", chainman.m_blockman.GetFirstStoredBlock(tip)->nHeight); // if 0, execution bypasses the whole if block. bool automatic_pruning{args.GetIntArg("-prune", 0) != 1}; obj.pushKV("automatic_pruning", automatic_pruning); if (automatic_pruning) { - obj.pushKV("prune_target_size", node::nPruneTarget); + obj.pushKV("prune_target_size", chainman.m_blockman.GetPruneTarget()); } } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 39f3260ca6..7704496c10 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -54,8 +54,11 @@ using node::PSBTAnalysis; using node::ReadBlockFromDisk; using node::UndoReadFromDisk; -static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry, Chainstate& active_chainstate, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_TXID) +static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry, + Chainstate& active_chainstate, const CTxUndo* txundo = nullptr, + TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS) { + CHECK_NONFATAL(verbosity >= TxVerbosity::SHOW_DETAILS); // Call into TxToUniv() in bitcoin-common to decode the transaction hex. // // Blockchain contextual information (confirmations and blocktime) is not diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 5815a059ae..72b9f2230e 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -4,11 +4,13 @@ #include <script/descriptor.h> +#include <hash.h> #include <key_io.h> #include <pubkey.h> #include <script/miniscript.h> #include <script/script.h> #include <script/standard.h> +#include <uint256.h> #include <span.h> #include <util/bip32.h> @@ -1618,8 +1620,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo } } if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH)) { - CScriptID scriptid; - CRIPEMD160().Write(data[0].data(), data[0].size()).Finalize(scriptid.begin()); + CScriptID scriptid{RIPEMD160(data[0])}; CScript subscript; if (provider.GetCScript(scriptid, subscript)) { auto sub = InferScript(subscript, ParseScriptContext::P2WSH, provider); diff --git a/src/script/sign.cpp b/src/script/sign.cpp index d369d4960d..70df9ee62c 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -286,7 +286,6 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator std::vector<valtype>& ret, TxoutType& whichTypeRet, SigVersion sigversion, SignatureData& sigdata) { CScript scriptRet; - uint160 h160; ret.clear(); std::vector<unsigned char> sig; @@ -315,8 +314,8 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator ret.push_back(ToByteVector(pubkey)); return true; } - case TxoutType::SCRIPTHASH: - h160 = uint160(vSolutions[0]); + case TxoutType::SCRIPTHASH: { + uint160 h160{vSolutions[0]}; if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; @@ -324,7 +323,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator // Could not find redeemScript, add to missing sigdata.missing_redeem_script = h160; return false; - + } case TxoutType::MULTISIG: { size_t required = vSolutions.front()[0]; ret.push_back(valtype()); // workaround CHECKMULTISIG bug @@ -350,8 +349,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator return true; case TxoutType::WITNESS_V0_SCRIPTHASH: - CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(h160.begin()); - if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) { + if (GetCScript(provider, sigdata, CScriptID{RIPEMD160(vSolutions[0])}, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; } diff --git a/src/script/sign.h b/src/script/sign.h index b32bb55dd3..263fb61fc5 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -13,6 +13,7 @@ #include <script/interpreter.h> #include <script/keyorigin.h> #include <script/standard.h> +#include <uint256.h> class CKey; class CKeyID; diff --git a/src/serialize.h b/src/serialize.h index f1edc54031..7bc7b10779 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1005,11 +1005,11 @@ struct CSerActionUnserialize class CSizeComputer { protected: - size_t nSize; + size_t nSize{0}; const int nVersion; public: - explicit CSizeComputer(int nVersionIn) : nSize(0), nVersion(nVersionIn) {} + explicit CSizeComputer(int nVersionIn) : nVersion(nVersionIn) {} void write(Span<const std::byte> src) { diff --git a/src/span.h b/src/span.h index 4d00bbc244..4692eca7fb 100644 --- a/src/span.h +++ b/src/span.h @@ -96,7 +96,7 @@ template<typename C> class Span { C* m_data; - std::size_t m_size; + std::size_t m_size{0}; template <class T> struct is_Span_int : public std::false_type {}; @@ -107,7 +107,7 @@ class Span public: - constexpr Span() noexcept : m_data(nullptr), m_size(0) {} + constexpr Span() noexcept : m_data(nullptr) {} /** Construct a span from a begin pointer and a size. * diff --git a/src/streams.h b/src/streams.h index c12ba8777a..ed9af308c9 100644 --- a/src/streams.h +++ b/src/streams.h @@ -628,8 +628,8 @@ private: const int nVersion; FILE *src; //!< source file - uint64_t nSrcPos; //!< how many bytes have been read from source - uint64_t m_read_pos; //!< how many bytes have been read from this + uint64_t nSrcPos{0}; //!< how many bytes have been read from source + uint64_t m_read_pos{0}; //!< how many bytes have been read from this uint64_t nReadLimit; //!< up to which position we're allowed to read uint64_t nRewind; //!< how many bytes we guarantee to rewind std::vector<std::byte> vchBuf; //!< the buffer @@ -675,7 +675,7 @@ private: public: CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) - : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), m_read_pos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0}) + : nType(nTypeIn), nVersion(nVersionIn), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0}) { if (nRewindIn >= nBufSize) throw std::ios_base::failure("Rewind limit must be less than buffer size"); diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 8b05743330..24ae4bdd1e 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -280,8 +280,8 @@ size_t PosixLockedPageAllocator::GetLimit() /*******************************************************************************/ // Implementation: LockedPool -LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in): - allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) +LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in) + : allocator(std::move(allocator_in)), lf_cb(lf_cb_in) { } diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 66fbc218ab..1bba459377 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -198,7 +198,7 @@ private: std::list<LockedPageArena> arenas; LockingFailed_Callback lf_cb; - size_t cumulative_bytes_locked; + size_t cumulative_bytes_locked{0}; /** Mutex protects access to this pool's data structures, including arenas. */ mutable std::mutex mutex; diff --git a/src/sync.h b/src/sync.h index 8ce2e7b124..7242a793ab 100644 --- a/src/sync.h +++ b/src/sync.h @@ -11,7 +11,7 @@ #include <logging/timer.h> #endif -#include <threadsafety.h> +#include <threadsafety.h> // IWYU pragma: export #include <util/macros.h> #include <condition_variable> diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index b15df43e8c..cf7bb776cc 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -67,14 +67,14 @@ BOOST_AUTO_TEST_CASE(addrman_simple) CNetAddr source = ResolveIP("252.2.2.2"); // Test: Does Addrman respond correctly when empty. - BOOST_CHECK_EQUAL(addrman->size(), 0U); + BOOST_CHECK_EQUAL(addrman->Size(), 0U); auto addr_null = addrman->Select().first; BOOST_CHECK_EQUAL(addr_null.ToString(), "[::]:0"); // Test: Does Addrman::Add work as expected. CService addr1 = ResolveService("250.1.1.1", 8333); BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); auto addr_ret1 = addrman->Select().first; BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333"); @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple) // Expected dup IP should not be added. CService addr1_dup = ResolveService("250.1.1.1", 8333); BOOST_CHECK(!addrman->Add({CAddress(addr1_dup, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); // Test: New table has one addr and we add a diff addr we should @@ -93,7 +93,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple) CService addr2 = ResolveService("250.1.1.2", 8333); BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source)); - BOOST_CHECK(addrman->size() >= 1); + BOOST_CHECK(addrman->Size() >= 1); // Test: reset addrman and test AddrMan::Add multiple addresses works as expected addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node)); @@ -101,7 +101,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple) vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE)); vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE)); BOOST_CHECK(addrman->Add(vAddr, source)); - BOOST_CHECK(addrman->size() >= 1); + BOOST_CHECK(addrman->Size() >= 1); } BOOST_AUTO_TEST_CASE(addrman_ports) @@ -110,23 +110,23 @@ BOOST_AUTO_TEST_CASE(addrman_ports) CNetAddr source = ResolveIP("252.2.2.2"); - BOOST_CHECK_EQUAL(addrman->size(), 0U); + BOOST_CHECK_EQUAL(addrman->Size(), 0U); // Test 7; Addr with same IP but diff port does not replace existing addr. CService addr1 = ResolveService("250.1.1.1", 8333); BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); CService addr1_port = ResolveService("250.1.1.1", 8334); BOOST_CHECK(addrman->Add({CAddress(addr1_port, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 2U); + BOOST_CHECK_EQUAL(addrman->Size(), 2U); auto addr_ret2 = addrman->Select().first; BOOST_CHECK(addr_ret2.ToString() == "250.1.1.1:8333" || addr_ret2.ToString() == "250.1.1.1:8334"); // Test: Add same IP but diff port to tried table; this converts the entry with // the specified port to tried, but not the other. addrman->Good(CAddress(addr1_port, NODE_NONE)); - BOOST_CHECK_EQUAL(addrman->size(), 2U); + BOOST_CHECK_EQUAL(addrman->Size(), 2U); bool newOnly = true; auto addr_ret3 = addrman->Select(newOnly).first; BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333"); @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE(addrman_select) // Test: Select from new with 1 addr in new. CService addr1 = ResolveService("250.1.1.1", 8333); BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); bool newOnly = true; auto addr_ret1 = addrman->Select(newOnly).first; @@ -150,14 +150,14 @@ BOOST_AUTO_TEST_CASE(addrman_select) // Test: move addr to tried, select from new expected nothing returned. BOOST_CHECK(addrman->Good(CAddress(addr1, NODE_NONE))); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); auto addr_ret2 = addrman->Select(newOnly).first; BOOST_CHECK_EQUAL(addr_ret2.ToString(), "[::]:0"); auto addr_ret3 = addrman->Select().first; BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333"); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); // Add three addresses to new table. @@ -182,7 +182,7 @@ BOOST_AUTO_TEST_CASE(addrman_select) BOOST_CHECK(addrman->Good(CAddress(addr7, NODE_NONE))); // Test: 6 addrs + 1 addr from last test = 7. - BOOST_CHECK_EQUAL(addrman->size(), 7U); + BOOST_CHECK_EQUAL(addrman->Size(), 7U); // Test: Select pulls from new and tried regardless of port number. std::set<uint16_t> ports; @@ -200,25 +200,25 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions) uint32_t num_addrs{0}; - BOOST_CHECK_EQUAL(addrman->size(), num_addrs); + BOOST_CHECK_EQUAL(addrman->Size(), num_addrs); while (num_addrs < 22) { // Magic number! 250.1.1.1 - 250.1.1.22 do not collide with deterministic key = 1 CService addr = ResolveService("250.1.1." + ToString(++num_addrs)); BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source)); // Test: No collision in new table yet. - BOOST_CHECK_EQUAL(addrman->size(), num_addrs); + BOOST_CHECK_EQUAL(addrman->Size(), num_addrs); } // Test: new table collision! CService addr1 = ResolveService("250.1.1." + ToString(++num_addrs)); uint32_t collisions{1}; BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions); + BOOST_CHECK_EQUAL(addrman->Size(), num_addrs - collisions); CService addr2 = ResolveService("250.1.1." + ToString(++num_addrs)); BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source)); - BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions); + BOOST_CHECK_EQUAL(addrman->Size(), num_addrs - collisions); } BOOST_AUTO_TEST_CASE(addrman_new_multiplicity) @@ -236,7 +236,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity) } AddressPosition addr_pos = addrman->FindAddressEntry(addr).value(); BOOST_CHECK_EQUAL(addr_pos.multiplicity, 1U); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); // if nTime increases, an addr can occur in up to 8 buckets // The acceptance probability decreases exponentially with existing multiplicity - @@ -250,7 +250,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity) AddressPosition addr_pos_multi = addrman->FindAddressEntry(addr).value(); BOOST_CHECK_EQUAL(addr_pos_multi.multiplicity, 8U); // multiplicity doesn't affect size - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); } BOOST_AUTO_TEST_CASE(addrman_tried_collisions) @@ -261,7 +261,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions) uint32_t num_addrs{0}; - BOOST_CHECK_EQUAL(addrman->size(), num_addrs); + BOOST_CHECK_EQUAL(addrman->Size(), num_addrs); while (num_addrs < 35) { // Magic number! 250.1.1.1 - 250.1.1.35 do not collide in tried with deterministic key = 1 CService addr = ResolveService("250.1.1." + ToString(++num_addrs)); @@ -290,7 +290,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) // Test: Sanity check, GetAddr should never return anything if addrman // is empty. - BOOST_CHECK_EQUAL(addrman->size(), 0U); + BOOST_CHECK_EQUAL(addrman->Size(), 0U); std::vector<CAddress> vAddr1 = addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt); BOOST_CHECK_EQUAL(vAddr1.size(), 0U); @@ -336,11 +336,11 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) } std::vector<CAddress> vAddr = addrman->GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt); - size_t percent23 = (addrman->size() * 23) / 100; + size_t percent23 = (addrman->Size() * 23) / 100; BOOST_CHECK_EQUAL(vAddr.size(), percent23); BOOST_CHECK_EQUAL(vAddr.size(), 461U); - // (Addrman.size() < number of addresses added) due to address collisions. - BOOST_CHECK_EQUAL(addrman->size(), 2006U); + // (addrman.Size() < number of addresses added) due to address collisions. + BOOST_CHECK_EQUAL(addrman->Size(), 2006U); } @@ -681,7 +681,7 @@ BOOST_AUTO_TEST_CASE(remove_invalid) addrman->Add({new1, tried1, new2, tried2}, CNetAddr{}); addrman->Good(tried1); addrman->Good(tried2); - BOOST_REQUIRE_EQUAL(addrman->size(), 4); + BOOST_REQUIRE_EQUAL(addrman->Size(), 4); stream << *addrman; @@ -704,14 +704,14 @@ BOOST_AUTO_TEST_CASE(remove_invalid) addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node)); stream >> *addrman; - BOOST_CHECK_EQUAL(addrman->size(), 2); + BOOST_CHECK_EQUAL(addrman->Size(), 2); } BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision) { auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node)); - BOOST_CHECK(addrman->size() == 0); + BOOST_CHECK(addrman->Size() == 0); // Empty addrman should return blank addrman info. BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0"); @@ -796,7 +796,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks) { auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node)); - BOOST_CHECK(addrman->size() == 0); + BOOST_CHECK(addrman->Size() == 0); // Empty addrman should return blank addrman info. BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0"); @@ -878,14 +878,14 @@ BOOST_AUTO_TEST_CASE(load_addrman) BOOST_CHECK(Lookup("252.5.1.1", source, 8333, false)); std::vector<CAddress> addresses{CAddress(addr1, NODE_NONE), CAddress(addr2, NODE_NONE), CAddress(addr3, NODE_NONE)}; BOOST_CHECK(addrman.Add(addresses, source)); - BOOST_CHECK(addrman.size() == 3); + BOOST_CHECK(addrman.Size() == 3); // Test that the de-serialization does not throw an exception. CDataStream ssPeers1 = AddrmanToStream(addrman); bool exceptionThrown = false; AddrMan addrman1{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)}; - BOOST_CHECK(addrman1.size() == 0); + BOOST_CHECK(addrman1.Size() == 0); try { unsigned char pchMsgTmp[4]; ssPeers1 >> pchMsgTmp; @@ -894,16 +894,16 @@ BOOST_AUTO_TEST_CASE(load_addrman) exceptionThrown = true; } - BOOST_CHECK(addrman1.size() == 3); + BOOST_CHECK(addrman1.Size() == 3); BOOST_CHECK(exceptionThrown == false); // Test that ReadFromStream creates an addrman with the correct number of addrs. CDataStream ssPeers2 = AddrmanToStream(addrman); AddrMan addrman2{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)}; - BOOST_CHECK(addrman2.size() == 0); + BOOST_CHECK(addrman2.Size() == 0); ReadFromStream(addrman2, ssPeers2); - BOOST_CHECK(addrman2.size() == 3); + BOOST_CHECK(addrman2.Size() == 3); } // Produce a corrupt peers.dat that claims 20 addrs when it only has one addr. @@ -939,7 +939,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted) CDataStream ssPeers1 = MakeCorruptPeersDat(); bool exceptionThrown = false; AddrMan addrman1{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)}; - BOOST_CHECK(addrman1.size() == 0); + BOOST_CHECK(addrman1.Size() == 0); try { unsigned char pchMsgTmp[4]; ssPeers1 >> pchMsgTmp; @@ -948,14 +948,14 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted) exceptionThrown = true; } // Even though de-serialization failed addrman is not left in a clean state. - BOOST_CHECK(addrman1.size() == 1); + BOOST_CHECK(addrman1.Size() == 1); BOOST_CHECK(exceptionThrown); // Test that ReadFromStream fails if peers.dat is corrupt CDataStream ssPeers2 = MakeCorruptPeersDat(); AddrMan addrman2{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)}; - BOOST_CHECK(addrman2.size() == 0); + BOOST_CHECK(addrman2.Size() == 0); BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure); } @@ -969,7 +969,7 @@ BOOST_AUTO_TEST_CASE(addrman_update_address) const auto start_time{Now<NodeSeconds>() - 10000s}; addr.nTime = start_time; BOOST_CHECK(addrman->Add({addr}, source)); - BOOST_CHECK_EQUAL(addrman->size(), 1U); + BOOST_CHECK_EQUAL(addrman->Size(), 1U); // Updating an addrman entry with a different port doesn't change it CAddress addr_diff_port{CAddress(ResolveService("250.1.1.1", 8334), NODE_NONE)}; @@ -990,4 +990,42 @@ BOOST_AUTO_TEST_CASE(addrman_update_address) BOOST_CHECK_EQUAL(vAddr2.at(0).nServices, NODE_NETWORK_LIMITED); } +BOOST_AUTO_TEST_CASE(addrman_size) +{ + auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node)); + const CNetAddr source = ResolveIP("252.2.2.2"); + + // empty addrman + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 0U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 0U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 0U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/false), 0U); + + // add two ipv4 addresses, one to tried and new + const CAddress addr1{ResolveService("250.1.1.1", 8333), NODE_NONE}; + BOOST_CHECK(addrman->Add({addr1}, source)); + BOOST_CHECK(addrman->Good(addr1)); + const CAddress addr2{ResolveService("250.1.1.2", 8333), NODE_NONE}; + BOOST_CHECK(addrman->Add({addr2}, source)); + + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 2U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 2U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 1U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/false), 1U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/true), 1U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/false), 1U); + + // add one i2p address to new + CService i2p_addr; + i2p_addr.SetSpecial("UDHDrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.I2P"); + const CAddress addr3{i2p_addr, NODE_NONE}; + BOOST_CHECK(addrman->Add({addr3}, source)); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 3U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 2U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_I2P, /*in_new=*/std::nullopt), 1U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_I2P, /*in_new=*/true), 1U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 2U); + BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/false), 1U); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index 31d437081a..312f417129 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -53,9 +53,9 @@ public: uint256 GetBestBlock() const override { return hashBestBlock_; } - bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock) override + bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock, bool erase = true) override { - for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) { + for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = erase ? mapCoins.erase(it) : std::next(it)) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Same optimization used in CCoinsViewDB is to only write dirty entries. map_[it->first] = it->second.coin; @@ -64,7 +64,6 @@ public: map_.erase(it->first); } } - mapCoins.erase(it++); } if (!hashBlock.IsNull()) hashBestBlock_ = hashBlock; @@ -126,6 +125,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) bool found_an_entry = false; bool missed_an_entry = false; bool uncached_an_entry = false; + bool flushed_without_erase = false; // A simple map to track what we expect the cache stack to represent. std::map<COutPoint, Coin> result; @@ -154,9 +154,16 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) bool test_havecoin_after = InsecureRandBits(2) == 0; bool result_havecoin = test_havecoin_before ? stack.back()->HaveCoin(COutPoint(txid, 0)) : false; - const Coin& entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0)); + + // Infrequently, test usage of AccessByTxid instead of AccessCoin - the + // former just delegates to the latter and returns the first unspent in a txn. + const Coin& entry = (InsecureRandRange(500) == 0) ? + AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0)); BOOST_CHECK(coin == entry); - BOOST_CHECK(!test_havecoin_before || result_havecoin == !entry.IsSpent()); + + if (test_havecoin_before) { + BOOST_CHECK(result_havecoin == !entry.IsSpent()); + } if (test_havecoin_after) { bool ret = stack.back()->HaveCoin(COutPoint(txid, 0)); @@ -167,24 +174,29 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) Coin newcoin; newcoin.out.nValue = InsecureRand32(); newcoin.nHeight = 1; + + // Infrequently test adding unspendable coins. if (InsecureRandRange(16) == 0 && coin.IsSpent()) { newcoin.out.scriptPubKey.assign(1 + InsecureRandBits(6), OP_RETURN); BOOST_CHECK(newcoin.out.scriptPubKey.IsUnspendable()); added_an_unspendable_entry = true; } else { - newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0); // Random sizes so we can test memory usage accounting + // Random sizes so we can test memory usage accounting + newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0); (coin.IsSpent() ? added_an_entry : updated_an_entry) = true; coin = newcoin; } - stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), !coin.IsSpent() || InsecureRand32() & 1); + bool is_overwrite = !coin.IsSpent() || InsecureRand32() & 1; + stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), is_overwrite); } else { + // Spend the coin. removed_an_entry = true; coin.Clear(); BOOST_CHECK(stack.back()->SpendCoin(COutPoint(txid, 0))); } } - // One every 10 iterations, remove a random entry from the cache + // Once every 10 iterations, remove a random entry from the cache if (InsecureRandRange(10) == 0) { COutPoint out(txids[InsecureRand32() % txids.size()], 0); int cacheid = InsecureRand32() % stack.size(); @@ -216,7 +228,9 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) if (stack.size() > 1 && InsecureRandBool() == 0) { unsigned int flushIndex = InsecureRandRange(stack.size() - 1); if (fake_best_block) stack[flushIndex]->SetBestBlock(InsecureRand256()); - BOOST_CHECK(stack[flushIndex]->Flush()); + bool should_erase = InsecureRandRange(4) < 3; + BOOST_CHECK(should_erase ? stack[flushIndex]->Flush() : stack[flushIndex]->Sync()); + flushed_without_erase |= !should_erase; } } if (InsecureRandRange(100) == 0) { @@ -224,7 +238,9 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) if (stack.size() > 0 && InsecureRandBool() == 0) { //Remove the top cache if (fake_best_block) stack.back()->SetBestBlock(InsecureRand256()); - BOOST_CHECK(stack.back()->Flush()); + bool should_erase = InsecureRandRange(4) < 3; + BOOST_CHECK(should_erase ? stack.back()->Flush() : stack.back()->Sync()); + flushed_without_erase |= !should_erase; delete stack.back(); stack.pop_back(); } @@ -260,6 +276,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block) BOOST_CHECK(found_an_entry); BOOST_CHECK(missed_an_entry); BOOST_CHECK(uncached_an_entry); + BOOST_CHECK(flushed_without_erase); } // Run the above simulation for multiple base types. @@ -589,9 +606,9 @@ static size_t InsertCoinsMapEntry(CCoinsMap& map, CAmount value, char flags) return inserted.first->second.coin.DynamicMemoryUsage(); } -void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags) +void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags, const COutPoint& outp = OUTPOINT) { - auto it = map.find(OUTPOINT); + auto it = map.find(outp); if (it == map.end()) { value = ABSENT; flags = NO_ENTRY; @@ -877,4 +894,205 @@ BOOST_AUTO_TEST_CASE(ccoins_write) CheckWriteCoins(parent_value, child_value, parent_value, parent_flags, child_flags, parent_flags); } + +Coin MakeCoin() +{ + Coin coin; + coin.out.nValue = InsecureRand32(); + coin.nHeight = InsecureRandRange(4096); + coin.fCoinBase = 0; + return coin; +} + + +//! For CCoinsViewCache instances backed by either another cache instance or +//! leveldb, test cache behavior and flag state (DIRTY/FRESH) by +//! +//! 1. Adding a random coin to the child-most cache, +//! 2. Flushing all caches (without erasing), +//! 3. Ensure the entry still exists in the cache and has been written to parent, +//! 4. (if `do_erasing_flush`) Flushing the caches again (with erasing), +//! 5. (if `do_erasing_flush`) Ensure the entry has been written to the parent and is no longer in the cache, +//! 6. Spend the coin, ensure it no longer exists in the parent. +//! +void TestFlushBehavior( + CCoinsViewCacheTest* view, + CCoinsViewDB& base, + std::vector<CCoinsViewCacheTest*>& all_caches, + bool do_erasing_flush) +{ + CAmount value; + char flags; + size_t cache_usage; + + auto flush_all = [&all_caches](bool erase) { + // Flush in reverse order to ensure that flushes happen from children up. + for (auto i = all_caches.rbegin(); i != all_caches.rend(); ++i) { + auto cache = *i; + // hashBlock must be filled before flushing to disk; value is + // unimportant here. This is normally done during connect/disconnect block. + cache->SetBestBlock(InsecureRand256()); + erase ? cache->Flush() : cache->Sync(); + } + }; + + uint256 txid = InsecureRand256(); + COutPoint outp = COutPoint(txid, 0); + Coin coin = MakeCoin(); + // Ensure the coins views haven't seen this coin before. + BOOST_CHECK(!base.HaveCoin(outp)); + BOOST_CHECK(!view->HaveCoin(outp)); + + // --- 1. Adding a random coin to the child cache + // + view->AddCoin(outp, Coin(coin), false); + + cache_usage = view->DynamicMemoryUsage(); + // `base` shouldn't have coin (no flush yet) but `view` should have cached it. + BOOST_CHECK(!base.HaveCoin(outp)); + BOOST_CHECK(view->HaveCoin(outp)); + + GetCoinsMapEntry(view->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, coin.out.nValue); + BOOST_CHECK_EQUAL(flags, DIRTY|FRESH); + + // --- 2. Flushing all caches (without erasing) + // + flush_all(/*erase=*/ false); + + // CoinsMap usage should be unchanged since we didn't erase anything. + BOOST_CHECK_EQUAL(cache_usage, view->DynamicMemoryUsage()); + + // --- 3. Ensuring the entry still exists in the cache and has been written to parent + // + GetCoinsMapEntry(view->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, coin.out.nValue); + BOOST_CHECK_EQUAL(flags, 0); // Flags should have been wiped. + + // Both views should now have the coin. + BOOST_CHECK(base.HaveCoin(outp)); + BOOST_CHECK(view->HaveCoin(outp)); + + if (do_erasing_flush) { + // --- 4. Flushing the caches again (with erasing) + // + flush_all(/*erase=*/ true); + + // Memory usage should have gone down. + BOOST_CHECK(view->DynamicMemoryUsage() < cache_usage); + + // --- 5. Ensuring the entry is no longer in the cache + // + GetCoinsMapEntry(view->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, ABSENT); + BOOST_CHECK_EQUAL(flags, NO_ENTRY); + + view->AccessCoin(outp); + GetCoinsMapEntry(view->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, coin.out.nValue); + BOOST_CHECK_EQUAL(flags, 0); + } + + // Can't overwrite an entry without specifying that an overwrite is + // expected. + BOOST_CHECK_THROW( + view->AddCoin(outp, Coin(coin), /*possible_overwrite=*/ false), + std::logic_error); + + // --- 6. Spend the coin. + // + BOOST_CHECK(view->SpendCoin(outp)); + + // The coin should be in the cache, but spent and marked dirty. + GetCoinsMapEntry(view->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, SPENT); + BOOST_CHECK_EQUAL(flags, DIRTY); + BOOST_CHECK(!view->HaveCoin(outp)); // Coin should be considered spent in `view`. + BOOST_CHECK(base.HaveCoin(outp)); // But coin should still be unspent in `base`. + + flush_all(/*erase=*/ false); + + // Coin should be considered spent in both views. + BOOST_CHECK(!view->HaveCoin(outp)); + BOOST_CHECK(!base.HaveCoin(outp)); + + // Spent coin should not be spendable. + BOOST_CHECK(!view->SpendCoin(outp)); + + // --- Bonus check: ensure that a coin added to the base view via one cache + // can be spent by another cache which has never seen it. + // + txid = InsecureRand256(); + outp = COutPoint(txid, 0); + coin = MakeCoin(); + BOOST_CHECK(!base.HaveCoin(outp)); + BOOST_CHECK(!all_caches[0]->HaveCoin(outp)); + BOOST_CHECK(!all_caches[1]->HaveCoin(outp)); + + all_caches[0]->AddCoin(outp, std::move(coin), false); + all_caches[0]->Sync(); + BOOST_CHECK(base.HaveCoin(outp)); + BOOST_CHECK(all_caches[0]->HaveCoin(outp)); + BOOST_CHECK(!all_caches[1]->HaveCoinInCache(outp)); + + BOOST_CHECK(all_caches[1]->SpendCoin(outp)); + flush_all(/*erase=*/ false); + BOOST_CHECK(!base.HaveCoin(outp)); + BOOST_CHECK(!all_caches[0]->HaveCoin(outp)); + BOOST_CHECK(!all_caches[1]->HaveCoin(outp)); + + flush_all(/*erase=*/ true); // Erase all cache content. + + // --- Bonus check 2: ensure that a FRESH, spent coin is deleted by Sync() + // + txid = InsecureRand256(); + outp = COutPoint(txid, 0); + coin = MakeCoin(); + CAmount coin_val = coin.out.nValue; + BOOST_CHECK(!base.HaveCoin(outp)); + BOOST_CHECK(!all_caches[0]->HaveCoin(outp)); + BOOST_CHECK(!all_caches[1]->HaveCoin(outp)); + + // Add and spend from same cache without flushing. + all_caches[0]->AddCoin(outp, std::move(coin), false); + + // Coin should be FRESH in the cache. + GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, coin_val); + BOOST_CHECK_EQUAL(flags, DIRTY|FRESH); + + // Base shouldn't have seen coin. + BOOST_CHECK(!base.HaveCoin(outp)); + + BOOST_CHECK(all_caches[0]->SpendCoin(outp)); + all_caches[0]->Sync(); + + // Ensure there is no sign of the coin after spend/flush. + GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp); + BOOST_CHECK_EQUAL(value, ABSENT); + BOOST_CHECK_EQUAL(flags, NO_ENTRY); + BOOST_CHECK(!all_caches[0]->HaveCoinInCache(outp)); + BOOST_CHECK(!base.HaveCoin(outp)); +} + +BOOST_AUTO_TEST_CASE(ccoins_flush_behavior) +{ + // Create two in-memory caches atop a leveldb view. + CCoinsViewDB base{"test", /*nCacheSize=*/ 1 << 23, /*fMemory=*/ true, /*fWipe=*/ false}; + std::vector<CCoinsViewCacheTest*> caches; + caches.push_back(new CCoinsViewCacheTest(&base)); + caches.push_back(new CCoinsViewCacheTest(caches.back())); + + for (CCoinsViewCacheTest* view : caches) { + TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ false); + TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ true); + } + + // Clean up the caches. + while (caches.size() > 0) { + delete caches.back(); + caches.pop_back(); + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 2953cf149d..a59e41dbb5 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -117,7 +117,7 @@ void FillAddrman(AddrMan& addrman, FuzzedDataProvider& fuzzed_data_provider) const std::chrono::seconds time_penalty{fast_random_context.randrange(100000001)}; addrman.Add({addr}, source, time_penalty); - if (n > 0 && addrman.size() % n == 0) { + if (n > 0 && addrman.Size() % n == 0) { addrman.Good(addr, Now<NodeSeconds>()); } @@ -304,7 +304,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman) /*max_pct=*/fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), /*network=*/std::nullopt); (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool()); - (void)const_addr_man.size(); + (void)const_addr_man.Size(); CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION); data_stream << const_addr_man; } diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index 46026d8df3..e75dc3ce91 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -75,6 +75,9 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view) (void)coins_view_cache.Flush(); }, [&] { + (void)coins_view_cache.Sync(); + }, + [&] { coins_view_cache.SetBestBlock(ConsumeUInt256(fuzzed_data_provider)); }, [&] { diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp index 975aff13c0..ac5dfe9e73 100644 --- a/src/test/util/net.cpp +++ b/src/test/util/net.cpp @@ -67,15 +67,14 @@ void ConnmanTestMsg::NodeReceiveMsgBytes(CNode& node, Span<const uint8_t> msg_by assert(node.ReceiveMsgBytes(msg_bytes, complete)); if (complete) { size_t nSizeAdded = 0; - auto it(node.vRecvMsg.begin()); - for (; it != node.vRecvMsg.end(); ++it) { + for (const auto& msg : node.vRecvMsg) { // vRecvMsg contains only completed CNetMessage // the single possible partially deserialized message are held by TransportDeserializer - nSizeAdded += it->m_raw_message_size; + nSizeAdded += msg.m_raw_message_size; } { LOCK(node.cs_vProcessMsg); - node.vProcessMsg.splice(node.vProcessMsg.end(), node.vRecvMsg, node.vRecvMsg.begin(), it); + node.vProcessMsg.splice(node.vProcessMsg.end(), node.vRecvMsg); node.nProcessQueueSize += nSizeAdded; node.fPauseRecv = node.nProcessQueueSize > nReceiveFloodSize; } diff --git a/src/test/util/net.h b/src/test/util/net.h index 90c606306f..e6506b0d08 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -103,7 +103,7 @@ constexpr auto ALL_NETWORKS = std::array{ class StaticContentsSock : public Sock { public: - explicit StaticContentsSock(const std::string& contents) : m_contents{contents}, m_consumed{0} + explicit StaticContentsSock(const std::string& contents) : m_contents{contents} { // Just a dummy number that is not INVALID_SOCKET. m_socket = INVALID_SOCKET - 1; @@ -191,7 +191,7 @@ public: private: const std::string m_contents; - mutable size_t m_consumed; + mutable size_t m_consumed{0}; }; std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candidates, FastRandomContext& random_context); diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 1b28e5f2c0..6e72f69968 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -208,23 +208,24 @@ ChainTestingSetup::~ChainTestingSetup() void TestingSetup::LoadVerifyActivateChainstate() { + auto& chainman{*Assert(m_node.chainman)}; node::ChainstateLoadOptions options; options.mempool = Assert(m_node.mempool.get()); options.block_tree_db_in_memory = m_block_tree_db_in_memory; options.coins_db_in_memory = m_coins_db_in_memory; options.reindex = node::fReindex; options.reindex_chainstate = m_args.GetBoolArg("-reindex-chainstate", false); - options.prune = node::fPruneMode; + options.prune = chainman.m_blockman.IsPruneMode(); options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS); options.check_level = m_args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL); - auto [status, error] = LoadChainstate(*Assert(m_node.chainman), m_cache_sizes, options); + auto [status, error] = LoadChainstate(chainman, m_cache_sizes, options); assert(status == node::ChainstateLoadStatus::SUCCESS); - std::tie(status, error) = VerifyLoadedChainstate(*Assert(m_node.chainman), options); + std::tie(status, error) = VerifyLoadedChainstate(chainman, options); assert(status == node::ChainstateLoadStatus::SUCCESS); BlockValidationState state; - if (!m_node.chainman->ActiveChainstate().ActivateBestChain(state)) { + if (!chainman.ActiveChainstate().ActivateBestChain(state)) { throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString())); } } diff --git a/src/tinyformat.h b/src/tinyformat.h index 8eded00add..3ec385bc95 100644 --- a/src/tinyformat.h +++ b/src/tinyformat.h @@ -508,9 +508,6 @@ class FormatArg { public: FormatArg() - : m_value(nullptr), - m_formatImpl(nullptr), - m_toIntImpl(nullptr) { } template<typename T> @@ -549,10 +546,10 @@ class FormatArg return convertToInt<T>::invoke(*static_cast<const T*>(value)); } - const void* m_value; + const void* m_value{nullptr}; void (*m_formatImpl)(std::ostream& out, const char* fmtBegin, - const char* fmtEnd, int ntrunc, const void* value); - int (*m_toIntImpl)(const void* value); + const char* fmtEnd, int ntrunc, const void* value){nullptr}; + int (*m_toIntImpl)(const void* value){nullptr}; }; diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index b5f1fa7138..ece77f9023 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -53,8 +53,8 @@ static const uint16_t DEFAULT_TOR_SOCKS_PORT = 9050; /****** Low-level TorControlConnection ********/ -TorControlConnection::TorControlConnection(struct event_base *_base): - base(_base), b_conn(nullptr) +TorControlConnection::TorControlConnection(struct event_base* _base) + : base(_base) { } diff --git a/src/torcontrol.h b/src/torcontrol.h index 81475aee74..6563a2ef42 100644 --- a/src/torcontrol.h +++ b/src/torcontrol.h @@ -93,7 +93,7 @@ private: /** Libevent event base */ struct event_base *base; /** Connection to control socket */ - struct bufferevent *b_conn; + struct bufferevent* b_conn{nullptr}; /** Message being received */ TorControlReply message; /** Response handlers */ diff --git a/src/txdb.cpp b/src/txdb.cpp index f04a4e9800..c12b540b9b 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -111,7 +111,7 @@ std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const { return vhashHeadBlocks; } -bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { +bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { CDBBatch batch(*m_db); size_t count = 0; size_t changed = 0; @@ -146,8 +146,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { changed++; } count++; - CCoinsMap::iterator itOld = it++; - mapCoins.erase(itOld); + it = erase ? mapCoins.erase(it) : std::next(it); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); m_db->WriteBatch(batch); diff --git a/src/txdb.h b/src/txdb.h index 5a409d7dcc..e3422846c0 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -62,7 +62,7 @@ public: bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector<uint256> GetHeadBlocks() const override; - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override; std::unique_ptr<CCoinsViewCursor> Cursor() const override; //! Whether an unsupported database format is used. diff --git a/src/univalue/include/univalue_utffilter.h b/src/univalue/include/univalue_utffilter.h index f688eaaa30..41d8e6bb05 100644 --- a/src/univalue/include/univalue_utffilter.h +++ b/src/univalue/include/univalue_utffilter.h @@ -13,8 +13,8 @@ class JSONUTF8StringFilter { public: - explicit JSONUTF8StringFilter(std::string &s): - str(s), is_valid(true), codepoint(0), state(0), surpair(0) + explicit JSONUTF8StringFilter(std::string& s) + : str(s) { } // Write single 8-bit char (may be part of UTF-8 sequence) @@ -79,10 +79,10 @@ public: } private: std::string &str; - bool is_valid; + bool is_valid{true}; // Current UTF-8 decoding state - unsigned int codepoint; - int state; // Top bit to be filled in for next UTF-8 byte, or 0 + unsigned int codepoint{0}; + int state{0}; // Top bit to be filled in for next UTF-8 byte, or 0 // Keep track of the following state to handle the following section of // RFC4627: @@ -94,7 +94,7 @@ private: // "\uD834\uDD1E". // // Two subsequent \u.... may have to be replaced with one actual codepoint. - unsigned int surpair; // First half of open UTF-16 surrogate pair, or 0 + unsigned int surpair{0}; // First half of open UTF-16 surrogate pair, or 0 void append_codepoint(unsigned int codepoint_) { diff --git a/src/util/sock.h b/src/util/sock.h index adcca377e3..6bac2dfd34 100644 --- a/src/util/sock.h +++ b/src/util/sock.h @@ -181,9 +181,9 @@ public: * Auxiliary requested/occurred events to wait for in `WaitMany()`. */ struct Events { - explicit Events(Event req) : requested{req}, occurred{0} {} + explicit Events(Event req) : requested{req} {} Event requested; - Event occurred; + Event occurred{0}; }; struct HashSharedPtrSock { diff --git a/src/validation.cpp b/src/validation.cpp index b42b398619..62ce1f1162 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -76,8 +76,6 @@ using node::BlockManager; using node::BlockMap; using node::CBlockIndexHeightOnlyComparator; using node::CBlockIndexWorkComparator; -using node::fImporting; -using node::fPruneMode; using node::fReindex; using node::ReadBlockFromDisk; using node::SnapshotMetadata; @@ -1573,8 +1571,9 @@ bool Chainstate::IsInitialBlockDownload() const LOCK(cs_main); if (m_cached_finished_ibd.load(std::memory_order_relaxed)) return false; - if (fImporting || fReindex) + if (m_chainman.m_blockman.LoadingBlocks()) { return true; + } if (m_chain.Tip() == nullptr) return true; if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) { @@ -2411,7 +2410,7 @@ bool Chainstate::FlushStateToDisk( CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(); LOCK(m_blockman.cs_LastBlockFile); - if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) { + if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) { // make sure we don't prune above any of the prune locks bestblocks // pruning is height-based int last_prune{m_chain.Height()}; // last height we can prune @@ -4097,7 +4096,7 @@ bool CVerifyDB::VerifyDB( if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) { break; } - if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) { + if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) { // If pruning or running under an assumeutxo snapshot, only go // back as far as we have data. LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight); diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index c619222b3e..653115aa81 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -308,7 +308,7 @@ BerkeleyDatabase::~BerkeleyDatabase() } } -BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_database(database) +BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : m_database(database) { database.AddRef(); database.Open(); diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index 9d1d68ba43..06c98972b0 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -207,9 +207,9 @@ private: bool HasKey(DataStream&& key) override; protected: - Db* pdb; + Db* pdb{nullptr}; std::string strFile; - DbTxn* activeTxn; + DbTxn* activeTxn{nullptr}; bool fReadOnly; bool fFlushOnClose; BerkeleyEnvironment *env; diff --git a/src/wallet/db.h b/src/wallet/db.h index 287fb1d19e..d4c590fac7 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -123,7 +123,7 @@ class WalletDatabase { public: /** Create dummy DB handle */ - WalletDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0) {} + WalletDatabase() : nUpdateCounter(0) {} virtual ~WalletDatabase() {}; /** Open the database if it is not already opened. */ @@ -165,9 +165,9 @@ public: virtual std::string Format() = 0; std::atomic<unsigned int> nUpdateCounter; - unsigned int nLastSeen; - unsigned int nLastFlushed; - int64_t nLastWalletUpdate; + unsigned int nLastSeen{0}; + unsigned int nLastFlushed{0}; + int64_t nLastWalletUpdate{0}; /** Make a DatabaseBatch connected to this database */ virtual std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) = 0; diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 4a5b9d59e5..93a6bbde20 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -6,6 +6,7 @@ #include <clientversion.h> #include <core_io.h> #include <fs.h> +#include <hash.h> #include <interfaces/chain.h> #include <key_io.h> #include <merkleblock.h> @@ -14,6 +15,7 @@ #include <script/script.h> #include <script/standard.h> #include <sync.h> +#include <uint256.h> #include <util/bip32.h> #include <util/system.h> #include <util/time.h> @@ -886,9 +888,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d } case TxoutType::WITNESS_V0_SCRIPTHASH: { if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2WSH inside another P2WSH"); - uint256 fullid(solverdata[0]); - CScriptID id; - CRIPEMD160().Write(fullid.begin(), fullid.size()).Finalize(id.begin()); + CScriptID id{RIPEMD160(solverdata[0])}; auto subscript = std::move(import_data.witnessscript); // Remove redeemscript from import_data to check for superfluous script later. if (!subscript) return "missing witnessscript"; if (CScriptID(*subscript) != id) return "witnessScript does not match the scriptPubKey or redeemScript"; diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp index 32151d5b5c..4c386789f1 100644 --- a/src/wallet/rpc/coins.cpp +++ b/src/wallet/rpc/coins.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <core_io.h> +#include <hash.h> #include <key_io.h> #include <rpc/util.h> #include <util/moneystr.h> @@ -679,8 +680,7 @@ RPCHelpMan listunspent() CHECK_NONFATAL(extracted); // Also return the witness script const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(witness_destination); - CScriptID id; - CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin()); + CScriptID id{RIPEMD160(whash)}; CScript witnessScript; if (provider->GetCScript(id, witnessScript)) { entry.pushKV("witnessScript", HexStr(witnessScript)); @@ -689,8 +689,7 @@ RPCHelpMan listunspent() } } else if (scriptPubKey.IsPayToWitnessScriptHash()) { const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(address); - CScriptID id; - CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin()); + CScriptID id{RIPEMD160(whash)}; CScript witnessScript; if (provider->GetCScript(id, witnessScript)) { entry.pushKV("witnessScript", HexStr(witnessScript)); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index d8f34dd2b0..d8fb926c9a 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <hash.h> #include <key_io.h> #include <logging.h> #include <outputtype.h> @@ -166,9 +167,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s if (sigversion == IsMineSigVersion::TOP && !keystore.HaveCScript(CScriptID(CScript() << OP_0 << vSolutions[0]))) { break; } - uint160 hash; - CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(hash.begin()); - CScriptID scriptID = CScriptID(hash); + CScriptID scriptID{RIPEMD160(vSolutions[0])}; CScript subscript; if (keystore.GetCScript(scriptID, subscript)) { ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0) : IsMineResult::SPENDABLE); diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h index 6ad222864a..290ef4eaa9 100644 --- a/src/wallet/transaction.h +++ b/src/wallet/transaction.h @@ -293,6 +293,7 @@ public: bool isAbandoned() const { return state<TxStateInactive>() && state<TxStateInactive>()->abandoned; } bool isConflicted() const { return state<TxStateConflicted>(); } + bool isInactive() const { return state<TxStateInactive>(); } bool isUnconfirmed() const { return !isAbandoned() && !isConflicted() && !isConfirmed(); } bool isConfirmed() const { return state<TxStateConfirmed>(); } const uint256& GetHash() const { return tx->GetHash(); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index e47a696188..aadad258a9 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1065,6 +1065,33 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const } } + // Mark inactive coinbase transactions and their descendants as abandoned + if (wtx.IsCoinBase() && wtx.isInactive()) { + std::vector<CWalletTx*> txs{&wtx}; + + TxStateInactive inactive_state = TxStateInactive{/*abandoned=*/true}; + + while (!txs.empty()) { + CWalletTx* desc_tx = txs.back(); + txs.pop_back(); + desc_tx->m_state = inactive_state; + // Break caches since we have changed the state + desc_tx->MarkDirty(); + batch.WriteTx(*desc_tx); + MarkInputsDirty(desc_tx->tx); + for (unsigned int i = 0; i < desc_tx->tx->vout.size(); ++i) { + COutPoint outpoint(desc_tx->GetHash(), i); + std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(outpoint); + for (TxSpends::const_iterator it = range.first; it != range.second; ++it) { + const auto wit = mapWallet.find(it->second); + if (wit != mapWallet.end()) { + txs.push_back(&wit->second); + } + } + } + } + } + //// debug print WalletLogPrintf("AddToWallet %s %s%s\n", hash.ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : "")); @@ -1274,7 +1301,11 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) wtx.MarkDirty(); batch.WriteTx(wtx); NotifyTransactionChanged(wtx.GetHash(), CT_UPDATED); - // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too + // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too. + // States are not permanent, so these transactions can become unabandoned if they are re-added to the + // mempool, or confirmed in a block, or conflicted. + // Note: If the reorged coinbase is re-added to the main chain, the descendants that have not had their + // states change will remain abandoned and will require manual broadcast if the user wants them. for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) { std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(COutPoint(now, i)); for (TxSpends::const_iterator iter = range.first; iter != range.second; ++iter) { diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 8b7e6dd526..108db11d32 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -954,10 +954,10 @@ private: using Clock = std::chrono::steady_clock; using NowFn = std::function<Clock::time_point()>; CWallet& m_wallet; - bool m_could_reserve; + bool m_could_reserve{false}; NowFn m_now; public: - explicit WalletRescanReserver(CWallet& w) : m_wallet(w), m_could_reserve(false) {} + explicit WalletRescanReserver(CWallet& w) : m_wallet(w) {} bool reserve() { diff --git a/src/zmq/zmqabstractnotifier.h b/src/zmq/zmqabstractnotifier.h index 6899ee8fa6..cf0ee48f47 100644 --- a/src/zmq/zmqabstractnotifier.h +++ b/src/zmq/zmqabstractnotifier.h @@ -20,7 +20,7 @@ class CZMQAbstractNotifier public: static const int DEFAULT_ZMQ_SNDHWM {1000}; - CZMQAbstractNotifier() : psocket(nullptr), outbound_message_high_water_mark(DEFAULT_ZMQ_SNDHWM) { } + CZMQAbstractNotifier() : outbound_message_high_water_mark(DEFAULT_ZMQ_SNDHWM) {} virtual ~CZMQAbstractNotifier(); template <typename T> @@ -57,7 +57,7 @@ public: virtual bool NotifyTransaction(const CTransaction &transaction); protected: - void *psocket; + void* psocket{nullptr}; std::string type; std::string address; int outbound_message_high_water_mark; // aka SNDHWM diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index 6dc4737d0a..df129c0830 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -21,7 +21,7 @@ #include <utility> #include <vector> -CZMQNotificationInterface::CZMQNotificationInterface() : pcontext(nullptr) +CZMQNotificationInterface::CZMQNotificationInterface() { } diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h index b24d4664da..a43f9bfef3 100644 --- a/src/zmq/zmqnotificationinterface.h +++ b/src/zmq/zmqnotificationinterface.h @@ -39,7 +39,7 @@ protected: private: CZMQNotificationInterface(); - void *pcontext; + void* pcontext{nullptr}; std::list<std::unique_ptr<CZMQAbstractNotifier>> notifiers; }; |