aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bench/crypto_hash.cpp4
-rw-r--r--src/bench/load_external.cpp2
-rw-r--r--src/bench/prevector.cpp2
-rw-r--r--src/bitcoin-util.cpp2
-rw-r--r--src/blockencodings.cpp2
-rw-r--r--src/coins.cpp52
-rw-r--r--src/coins.h20
-rw-r--r--src/common/bloom.cpp4
-rw-r--r--src/core_read.cpp2
-rw-r--r--src/dbwrapper.h14
-rw-r--r--src/hash.h42
-rw-r--r--src/index/base.cpp5
-rw-r--r--src/init.cpp10
-rw-r--r--src/kernel/coinstats.cpp5
-rw-r--r--src/kernel/coinstats.h2
-rw-r--r--src/mapport.h8
-rw-r--r--src/net_processing.cpp59
-rw-r--r--src/node/blockstorage.cpp12
-rw-r--r--src/node/blockstorage.h14
-rw-r--r--src/node/chainstate.cpp6
-rw-r--r--src/node/interfaces.cpp5
-rw-r--r--src/node/utxo_snapshot.cpp5
-rw-r--r--src/node/utxo_snapshot.h9
-rw-r--r--src/qt/recentrequeststablemodel.cpp4
-rw-r--r--src/qt/test/wallettests.cpp2
-rw-r--r--src/rest.cpp20
-rw-r--r--src/rpc/blockchain.cpp15
-rw-r--r--src/rpc/txoutproof.cpp4
-rw-r--r--src/script/descriptor.cpp5
-rw-r--r--src/script/sign.cpp10
-rw-r--r--src/script/sign.h1
-rw-r--r--src/streams.h82
-rw-r--r--src/sync.h2
-rw-r--r--src/test/blockencodings_tests.cpp6
-rw-r--r--src/test/blockfilter_tests.cpp2
-rw-r--r--src/test/bloom_tests.cpp8
-rw-r--r--src/test/coins_tests.cpp254
-rw-r--r--src/test/crypto_tests.cpp4
-rw-r--r--src/test/fuzz/coins_view.cpp3
-rw-r--r--src/test/fuzz/integer.cpp6
-rw-r--r--src/test/fuzz/key.cpp2
-rw-r--r--src/test/fuzz/prevector.cpp4
-rw-r--r--src/test/fuzz/rpc.cpp2
-rw-r--r--src/test/fuzz/string.cpp4
-rw-r--r--src/test/fuzz/tx_in.cpp5
-rw-r--r--src/test/fuzz/tx_out.cpp5
-rw-r--r--src/test/key_tests.cpp4
-rw-r--r--src/test/pmt_tests.cpp2
-rw-r--r--src/test/prevector_tests.cpp4
-rw-r--r--src/test/serfloat_tests.cpp2
-rw-r--r--src/test/serialize_tests.cpp10
-rw-r--r--src/test/streams_tests.cpp14
-rw-r--r--src/test/uint256_tests.cpp2
-rw-r--r--src/test/util/setup_common.cpp9
-rw-r--r--src/txdb.cpp5
-rw-r--r--src/txdb.h2
-rw-r--r--src/validation.cpp9
-rw-r--r--src/wallet/bdb.cpp16
-rw-r--r--src/wallet/bdb.h10
-rw-r--r--src/wallet/db.h28
-rw-r--r--src/wallet/dump.cpp8
-rw-r--r--src/wallet/rpc/backup.cpp8
-rw-r--r--src/wallet/rpc/coins.cpp7
-rw-r--r--src/wallet/salvage.cpp2
-rw-r--r--src/wallet/scriptpubkeyman.cpp5
-rw-r--r--src/wallet/sqlite.cpp10
-rw-r--r--src/wallet/sqlite.h10
-rw-r--r--src/wallet/test/util.cpp4
-rw-r--r--src/wallet/test/wallet_tests.cpp10
-rw-r--r--src/wallet/test/walletload_tests.cpp4
-rw-r--r--src/wallet/transaction.h1
-rw-r--r--src/wallet/wallet.cpp41
-rw-r--r--src/wallet/walletdb.cpp14
-rw-r--r--src/wallet/walletdb.h2
74 files changed, 682 insertions, 297 deletions
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index bd524e7458..cf8d807d7b 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -18,7 +18,7 @@
/* Number of bytes to hash per iteration */
static const uint64_t BUFFER_SIZE = 1000*1000;
-static void RIPEMD160(benchmark::Bench& bench)
+static void BenchRIPEMD160(benchmark::Bench& bench)
{
uint8_t hash[CRIPEMD160::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
@@ -150,7 +150,7 @@ static void MuHashPrecompute(benchmark::Bench& bench)
});
}
-BENCHMARK(RIPEMD160, benchmark::PriorityLevel::HIGH);
+BENCHMARK(BenchRIPEMD160, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA1, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA256, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA512, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/load_external.cpp b/src/bench/load_external.cpp
index be01b2a483..0fd842c7c3 100644
--- a/src/bench/load_external.cpp
+++ b/src/bench/load_external.cpp
@@ -27,7 +27,7 @@ static void LoadExternalBlockFile(benchmark::Bench& bench)
// Create a single block as in the blocks files (magic bytes, block size,
// block data) as a stream object.
const fs::path blkfile{testing_setup.get()->m_path_root / "blk.dat"};
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
auto params{testing_setup->m_node.chainman->GetParams()};
ss << params.MessageStart();
ss << static_cast<uint32_t>(benchmark::data::block413567.size());
diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp
index ef1ea1162b..59c4af086e 100644
--- a/src/bench/prevector.cpp
+++ b/src/bench/prevector.cpp
@@ -61,7 +61,7 @@ static void PrevectorResize(benchmark::Bench& bench)
template <typename T>
static void PrevectorDeserialize(benchmark::Bench& bench)
{
- CDataStream s0(SER_NETWORK, 0);
+ DataStream s0{};
prevector<28, T> t0;
t0.resize(28);
for (auto x = 0; x < 900; ++x) {
diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp
index 7327875b64..61d4b9c6f1 100644
--- a/src/bitcoin-util.cpp
+++ b/src/bitcoin-util.cpp
@@ -139,7 +139,7 @@ static int Grind(const std::vector<std::string>& args, std::string& strPrint)
return EXIT_FAILURE;
}
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss{};
ss << header;
strPrint = HexStr(ss);
return EXIT_SUCCESS;
diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
index 0d5575e5d5..a29e4f794e 100644
--- a/src/blockencodings.cpp
+++ b/src/blockencodings.cpp
@@ -29,7 +29,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
}
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << header << nonce;
CSHA256 hasher;
hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
diff --git a/src/coins.cpp b/src/coins.cpp
index 976118e23c..31ac67674a 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -13,7 +13,7 @@
bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); }
-bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; }
+bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return false; }
std::unique_ptr<CCoinsViewCursor> CCoinsView::Cursor() const { return nullptr; }
bool CCoinsView::HaveCoin(const COutPoint &outpoint) const
@@ -28,7 +28,7 @@ bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
-bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); }
+bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return base->BatchWrite(mapCoins, hashBlock, erase); }
std::unique_ptr<CCoinsViewCursor> CCoinsViewBacked::Cursor() const { return base->Cursor(); }
size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); }
@@ -176,8 +176,10 @@ void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) {
hashBlock = hashBlockIn;
}
-bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn) {
- for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = mapCoins.erase(it)) {
+bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn, bool erase) {
+ for (CCoinsMap::iterator it = mapCoins.begin();
+ it != mapCoins.end();
+ it = erase ? mapCoins.erase(it) : std::next(it)) {
// Ignore non-dirty entries (optimization).
if (!(it->second.flags & CCoinsCacheEntry::DIRTY)) {
continue;
@@ -190,7 +192,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
// Create the coin in the parent cache, move the data up
// and mark it as dirty.
CCoinsCacheEntry& entry = cacheCoins[it->first];
- entry.coin = std::move(it->second.coin);
+ if (erase) {
+ // The `move` call here is purely an optimization; we rely on the
+ // `mapCoins.erase` call in the `for` expression to actually remove
+ // the entry from the child map.
+ entry.coin = std::move(it->second.coin);
+ } else {
+ entry.coin = it->second.coin;
+ }
cachedCoinsUsage += entry.coin.DynamicMemoryUsage();
entry.flags = CCoinsCacheEntry::DIRTY;
// We can mark it FRESH in the parent if it was FRESH in the child
@@ -218,7 +227,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
} else {
// A normal modification.
cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage();
- itUs->second.coin = std::move(it->second.coin);
+ if (erase) {
+ // The `move` call here is purely an optimization; we rely on the
+ // `mapCoins.erase` call in the `for` expression to actually remove
+ // the entry from the child map.
+ itUs->second.coin = std::move(it->second.coin);
+ } else {
+ itUs->second.coin = it->second.coin;
+ }
cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage();
itUs->second.flags |= CCoinsCacheEntry::DIRTY;
// NOTE: It isn't safe to mark the coin as FRESH in the parent
@@ -233,12 +249,32 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
}
bool CCoinsViewCache::Flush() {
- bool fOk = base->BatchWrite(cacheCoins, hashBlock);
- cacheCoins.clear();
+ bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/true);
+ if (fOk && !cacheCoins.empty()) {
+ /* BatchWrite must erase all cacheCoins elements when erase=true. */
+ throw std::logic_error("Not all cached coins were erased");
+ }
cachedCoinsUsage = 0;
return fOk;
}
+bool CCoinsViewCache::Sync()
+{
+ bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/false);
+ // Instead of clearing `cacheCoins` as we would in Flush(), just clear the
+ // FRESH/DIRTY flags of any coin that isn't spent.
+ for (auto it = cacheCoins.begin(); it != cacheCoins.end(); ) {
+ if (it->second.coin.IsSpent()) {
+ cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
+ it = cacheCoins.erase(it);
+ } else {
+ it->second.flags = 0;
+ ++it;
+ }
+ }
+ return fOk;
+}
+
void CCoinsViewCache::Uncache(const COutPoint& hash)
{
CCoinsMap::iterator it = cacheCoins.find(hash);
diff --git a/src/coins.h b/src/coins.h
index b0d6bdf333..4edc146a14 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -176,7 +176,7 @@ public:
//! Do a bulk modification (multiple Coin changes + BestBlock change).
//! The passed mapCoins can be modified.
- virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock);
+ virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true);
//! Get a cursor to iterate over the whole state
virtual std::unique_ptr<CCoinsViewCursor> Cursor() const;
@@ -202,7 +202,7 @@ public:
uint256 GetBestBlock() const override;
std::vector<uint256> GetHeadBlocks() const override;
void SetBackend(CCoinsView &viewIn);
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override;
size_t EstimateSize() const override;
};
@@ -235,7 +235,7 @@ public:
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
void SetBestBlock(const uint256 &hashBlock);
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override {
throw std::logic_error("CCoinsViewCache cursor iteration not supported.");
}
@@ -282,13 +282,23 @@ public:
bool SpendCoin(const COutPoint &outpoint, Coin* moveto = nullptr);
/**
- * Push the modifications applied to this cache to its base.
- * Failure to call this method before destruction will cause the changes to be forgotten.
+ * Push the modifications applied to this cache to its base and wipe local state.
+ * Failure to call this method or Sync() before destruction will cause the changes
+ * to be forgotten.
* If false is returned, the state of this cache (and its backing view) will be undefined.
*/
bool Flush();
/**
+ * Push the modifications applied to this cache to its base while retaining
+ * the contents of this cache (except for spent coins, which we erase).
+ * Failure to call this method or Flush() before destruction will cause the changes
+ * to be forgotten.
+ * If false is returned, the state of this cache (and its backing view) will be undefined.
+ */
+ bool Sync();
+
+ /**
* Removes the UTXO with the given outpoint from the cache, if it is
* not modified.
*/
diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp
index 3ba0414b31..fd3276b5a7 100644
--- a/src/common/bloom.cpp
+++ b/src/common/bloom.cpp
@@ -60,7 +60,7 @@ void CBloomFilter::insert(Span<const unsigned char> vKey)
void CBloomFilter::insert(const COutPoint& outpoint)
{
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << outpoint;
insert(MakeUCharSpan(stream));
}
@@ -81,7 +81,7 @@ bool CBloomFilter::contains(Span<const unsigned char> vKey) const
bool CBloomFilter::contains(const COutPoint& outpoint) const
{
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << outpoint;
return contains(MakeUCharSpan(stream));
}
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 7bab171c89..84cd559b7f 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -207,7 +207,7 @@ bool DecodeHexBlockHeader(CBlockHeader& header, const std::string& hex_header)
if (!IsHex(hex_header)) return false;
const std::vector<unsigned char> header_data{ParseHex(hex_header)};
- CDataStream ser_header(header_data, SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ser_header{header_data};
try {
ser_header >> header;
} catch (const std::exception&) {
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 3d3eee32ce..f47bd8188e 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -68,7 +68,7 @@ private:
const CDBWrapper &parent;
leveldb::WriteBatch batch;
- CDataStream ssKey;
+ DataStream ssKey{};
CDataStream ssValue;
size_t size_estimate;
@@ -77,7 +77,7 @@ public:
/**
* @param[in] _parent CDBWrapper that this batch is to be submitted to
*/
- explicit CDBBatch(const CDBWrapper &_parent) : parent(_parent), ssKey(SER_DISK, CLIENT_VERSION), ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0) { };
+ explicit CDBBatch(const CDBWrapper& _parent) : parent(_parent), ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0){};
void Clear()
{
@@ -151,7 +151,7 @@ public:
void SeekToFirst();
template<typename K> void Seek(const K& key) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -163,7 +163,7 @@ public:
template<typename K> bool GetKey(K& key) {
leveldb::Slice slKey = piter->key();
try {
- CDataStream ssKey{MakeByteSpan(slKey), SER_DISK, CLIENT_VERSION};
+ DataStream ssKey{MakeByteSpan(slKey)};
ssKey >> key;
} catch (const std::exception&) {
return false;
@@ -247,7 +247,7 @@ public:
template <typename K, typename V>
bool Read(const K& key, V& value) const
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -289,7 +289,7 @@ public:
template <typename K>
bool Exists(const K& key) const
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -331,7 +331,7 @@ public:
template<typename K>
size_t EstimateSize(const K& key_begin, const K& key_end) const
{
- CDataStream ssKey1(SER_DISK, CLIENT_VERSION), ssKey2(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey1{}, ssKey2{};
ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey1 << key_begin;
diff --git a/src/hash.h b/src/hash.h
index 6500f1c709..2e3ed11b43 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -12,6 +12,7 @@
#include <crypto/sha256.h>
#include <prevector.h>
#include <serialize.h>
+#include <span.h>
#include <uint256.h>
#include <version.h>
@@ -166,6 +167,39 @@ public:
};
/** Reads data from an underlying stream, while hashing the read data. */
+template <typename Source>
+class HashVerifier : public HashWriter
+{
+private:
+ Source& m_source;
+
+public:
+ explicit HashVerifier(Source& source LIFETIMEBOUND) : m_source{source} {}
+
+ void read(Span<std::byte> dst)
+ {
+ m_source.read(dst);
+ this->write(dst);
+ }
+
+ void ignore(size_t num_bytes)
+ {
+ std::byte data[1024];
+ while (num_bytes > 0) {
+ size_t now = std::min<size_t>(num_bytes, 1024);
+ read({data, now});
+ num_bytes -= now;
+ }
+ }
+
+ template <typename T>
+ HashVerifier<Source>& operator>>(T&& obj)
+ {
+ ::Unserialize(*this, obj);
+ return *this;
+ }
+};
+
template<typename Source>
class CHashVerifier : public CHashWriter
{
@@ -248,4 +282,12 @@ void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char he
*/
HashWriter TaggedHash(const std::string& tag);
+/** Compute the 160-bit RIPEMD-160 hash of an array. */
+inline uint160 RIPEMD160(Span<const unsigned char> data)
+{
+ uint160 result;
+ CRIPEMD160().Write(data.data(), data.size()).Finalize(result.begin());
+ return result;
+}
+
#endif // BITCOIN_HASH_H
diff --git a/src/index/base.cpp b/src/index/base.cpp
index a8b8cbe8a9..1d5c0dbe24 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -415,8 +415,9 @@ IndexSummary BaseIndex::GetSummary() const
return summary;
}
-void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) {
- assert(!node::fPruneMode || AllowPrune());
+void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
+{
+ assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
if (AllowPrune() && block) {
node::PruneLockInfo prune_lock;
diff --git a/src/init.cpp b/src/init.cpp
index 75eb114163..5b486854e0 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1499,7 +1499,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
options.mempool = Assert(node.mempool.get());
options.reindex = node::fReindex;
options.reindex_chainstate = fReindexChainState;
- options.prune = node::fPruneMode;
+ options.prune = chainman.m_blockman.IsPruneMode();
options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
options.check_interrupt = ShutdownRequested;
@@ -1609,7 +1609,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// if pruning, perform the initial blockstore prune
// after any wallet rescanning has taken place.
- if (fPruneMode) {
+ if (chainman.m_blockman.IsPruneMode()) {
if (!fReindex) {
LOCK(cs_main);
for (Chainstate* chainstate : chainman.GetAll()) {
@@ -1637,8 +1637,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// On first startup, warn on low block storage space
if (!fReindex && !fReindexChainState && chain_active_height <= 1) {
- uint64_t additional_bytes_needed = fPruneMode ? nPruneTarget
- : chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024;
+ uint64_t additional_bytes_needed{
+ chainman.m_blockman.IsPruneMode() ?
+ chainman.m_blockman.GetPruneTarget() :
+ chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024};
if (!CheckDiskSpace(args.GetBlocksDirPath(), additional_bytes_needed)) {
InitWarning(strprintf(_(
diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp
index 06a4b8c974..82d7d8c46b 100644
--- a/src/kernel/coinstats.cpp
+++ b/src/kernel/coinstats.cpp
@@ -48,8 +48,9 @@ uint64_t GetBogoSize(const CScript& script_pub_key)
script_pub_key.size() /* scriptPubKey */;
}
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin) {
- CDataStream ss(SER_DISK, PROTOCOL_VERSION);
+DataStream TxOutSer(const COutPoint& outpoint, const Coin& coin)
+{
+ DataStream ss{};
ss << outpoint;
ss << static_cast<uint32_t>(coin.nHeight * 2 + coin.fCoinBase);
ss << coin.out;
diff --git a/src/kernel/coinstats.h b/src/kernel/coinstats.h
index b7c1328e93..54d0e4f664 100644
--- a/src/kernel/coinstats.h
+++ b/src/kernel/coinstats.h
@@ -72,7 +72,7 @@ struct CCoinsStats {
uint64_t GetBogoSize(const CScript& script_pub_key);
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin);
+DataStream TxOutSer(const COutPoint& outpoint, const Coin& coin);
std::optional<CCoinsStats> ComputeUTXOStats(CoinStatsHashType hash_type, CCoinsView* view, node::BlockManager& blockman, const std::function<void()>& interruption_point = {});
} // namespace kernel
diff --git a/src/mapport.h b/src/mapport.h
index 279d65167f..6f55c46f6c 100644
--- a/src/mapport.h
+++ b/src/mapport.h
@@ -5,17 +5,9 @@
#ifndef BITCOIN_MAPPORT_H
#define BITCOIN_MAPPORT_H
-#ifdef USE_UPNP
-static constexpr bool DEFAULT_UPNP = USE_UPNP;
-#else
static constexpr bool DEFAULT_UPNP = false;
-#endif // USE_UPNP
-#ifdef USE_NATPMP
-static constexpr bool DEFAULT_NATPMP = USE_NATPMP;
-#else
static constexpr bool DEFAULT_NATPMP = false;
-#endif // USE_NATPMP
enum MapPortProtoFlag : unsigned int {
NONE = 0x00,
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index c764b61d7a..a659300a0d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -53,9 +53,6 @@
using node::ReadBlockFromDisk;
using node::ReadRawBlockFromDisk;
-using node::fImporting;
-using node::fPruneMode;
-using node::fReindex;
/** How long to cache transactions in mapRelay for normal relay */
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
@@ -113,8 +110,11 @@ static constexpr auto GETDATA_TX_INTERVAL{60s};
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
-/** Time during which a peer must stall block download progress before being disconnected. */
-static constexpr auto BLOCK_STALLING_TIMEOUT{2s};
+/** Default time during which a peer must stall block download progress before being disconnected.
+ * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */
+static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
+/** Maximum timeout for stalling block download. */
+static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
@@ -774,6 +774,9 @@ private:
/** Number of preferable block download peers. */
int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
+ /** Stalling timeout for blocks in IBD */
+ std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
+
bool AlreadyHaveTx(const GenTxid& gtxid)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
@@ -1733,8 +1736,7 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
{
- if (fImporting) return "Importing...";
- if (fReindex) return "Reindexing...";
+ if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
// Ensure this peer exists and hasn't been disconnected
PeerRef peer = GetPeerRef(peer_id);
@@ -1812,7 +1814,8 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
/**
* Evict orphan txn pool entries based on a newly connected
* block, remember the recently confirmed transactions, and delete tracked
- * announcements for them. Also save the time of the last tip update.
+ * announcements for them. Also save the time of the last tip update and
+ * possibly reduce dynamic block stalling timeout.
*/
void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
{
@@ -1835,6 +1838,16 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
}
}
+
+ // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
+ auto stalling_timeout = m_block_stalling_timeout.load();
+ Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
+ const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
+ LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
+ }
+ }
}
void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
@@ -3362,7 +3375,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// If the peer is old enough to have the old alert system, send it the final alert.
if (greatest_common_version <= 70012) {
- CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
+ DataStream finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
}
@@ -3680,7 +3693,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
UpdateBlockAvailability(pfrom.GetId(), inv.hash);
- if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) {
+ if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
// Headers-first is the primary method of announcement on
// the network. If a node fell back to sending blocks by
// inv, it may be for a re-org, or because we haven't
@@ -3813,8 +3826,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
- if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave))
- {
+ if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
@@ -3890,7 +3902,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
return;
}
@@ -4169,7 +4181,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::CMPCTBLOCK)
{
// Ignore cmpctblock received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4385,7 +4397,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::BLOCKTXN)
{
// Ignore blocktxn received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4460,7 +4472,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::HEADERS)
{
// Ignore headers received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4505,7 +4517,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::BLOCK)
{
// Ignore block received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
return;
}
@@ -5092,7 +5104,7 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
if (now > m_stale_tip_check_time) {
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer
- if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
+ if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
count_seconds(now - m_last_tip_update.load()));
m_connman.SetTryNewOutboundPeer(true);
@@ -5399,7 +5411,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}
}
- if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting && !fReindex) {
+ if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
const CBlockIndex* pindexStart = m_chainman.m_best_header;
@@ -5713,12 +5725,19 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
- if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
+ auto stalling_timeout = m_block_stalling_timeout.load();
+ if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
+ // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
+ // bandwidth is insufficient.
+ const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
+ if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
+ LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
+ }
return true;
}
// In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index b8a57acf80..a81099a26c 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -352,7 +352,7 @@ bool BlockManager::LoadBlockIndexDB(const Consensus::Params& consensus_params)
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
FlatFilePos pos(*it, 0);
- if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
+ if (AutoFile{OpenBlockFile(pos, true)}.IsNull()) {
return false;
}
}
@@ -454,13 +454,13 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
- CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
+ AutoFile fileout{OpenUndoFile(pos)};
if (fileout.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Write index header
- unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
+ unsigned int nSize = GetSerializeSize(blockundo, CLIENT_VERSION);
fileout << messageStart << nSize;
// Write undo data
@@ -489,14 +489,14 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
}
// Open history file to read
- CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
+ AutoFile filein{OpenUndoFile(pos, true)};
if (filein.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Read block
uint256 hashChecksum;
- CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
+ HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
try {
verifier << pindex->pprev->GetBlockHash();
verifier >> blockundo;
@@ -768,7 +768,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
{
FlatFilePos hpos = pos;
hpos.nPos -= 8; // Seek back 8 bytes for meta header
- CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
+ AutoFile filein{OpenBlockFile(hpos, true)};
if (filein.IsNull()) {
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
}
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index cdf667c754..b6007897df 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -48,10 +48,7 @@ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = CMessageHeader::MESSAG
extern std::atomic_bool fImporting;
extern std::atomic_bool fReindex;
-/** Pruning-related variables and constants */
-/** True if we're running in -prune mode. */
extern bool fPruneMode;
-/** Number of bytes of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
// Because validation code takes pointers to the map's CBlockIndex objects, if
@@ -176,6 +173,17 @@ public:
/** Store block on disk. If dbp is not nullptr, then it provides the known position of the block within a block file on disk. */
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
+ /** Whether running in -prune mode. */
+ [[nodiscard]] bool IsPruneMode() const { return fPruneMode; }
+
+ /** Attempt to stay below this number of bytes of block files. */
+ [[nodiscard]] uint64_t GetPruneTarget() const { return nPruneTarget; }
+
+ [[nodiscard]] bool LoadingBlocks() const
+ {
+ return fImporting || fReindex;
+ }
+
/** Calculate the amount of disk space the block & undo files currently use */
uint64_t CalculateCurrentUsage();
diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp
index 99dc319ec0..ba1024d22e 100644
--- a/src/node/chainstate.cpp
+++ b/src/node/chainstate.cpp
@@ -44,10 +44,10 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) {
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex());
}
- if (nPruneTarget == std::numeric_limits<uint64_t>::max()) {
+ if (chainman.m_blockman.GetPruneTarget() == std::numeric_limits<uint64_t>::max()) {
LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n");
- } else if (nPruneTarget) {
- LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", nPruneTarget / 1024 / 1024);
+ } else if (chainman.m_blockman.GetPruneTarget()) {
+ LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", chainman.m_blockman.GetPruneTarget() / 1024 / 1024);
}
LOCK(cs_main);
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 4f3dc99bbf..eda359568f 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -711,8 +711,9 @@ public:
LOCK(::cs_main);
return chainman().m_blockman.m_have_pruned;
}
- bool isReadyToBroadcast() override { return !node::fImporting && !node::fReindex && !isInitialBlockDownload(); }
- bool isInitialBlockDownload() override {
+ bool isReadyToBroadcast() override { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); }
+ bool isInitialBlockDownload() override
+ {
return chainman().ActiveChainstate().IsInitialBlockDownload();
}
bool shutdownRequested() override { return ShutdownRequested(); }
diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp
index bab1b75211..cccf95e552 100644
--- a/src/node/utxo_snapshot.cpp
+++ b/src/node/utxo_snapshot.cpp
@@ -7,12 +7,17 @@
#include <fs.h>
#include <logging.h>
#include <streams.h>
+#include <sync.h>
+#include <tinyformat.h>
+#include <txdb.h>
#include <uint256.h>
#include <util/system.h>
#include <validation.h>
+#include <cassert>
#include <cstdio>
#include <optional>
+#include <string>
namespace node {
diff --git a/src/node/utxo_snapshot.h b/src/node/utxo_snapshot.h
index b5ed9ef9fe..c5c018c9e8 100644
--- a/src/node/utxo_snapshot.h
+++ b/src/node/utxo_snapshot.h
@@ -7,13 +7,16 @@
#define BITCOIN_NODE_UTXO_SNAPSHOT_H
#include <fs.h>
-#include <uint256.h>
+#include <kernel/cs_main.h>
#include <serialize.h>
-#include <validation.h>
+#include <sync.h>
+#include <uint256.h>
+#include <cstdint>
#include <optional>
+#include <string_view>
-extern RecursiveMutex cs_main;
+class Chainstate;
namespace node {
//! Metadata describing a serialized version of a UTXO set from which an
diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp
index 85ade624cf..52d4e45d49 100644
--- a/src/qt/recentrequeststablemodel.cpp
+++ b/src/qt/recentrequeststablemodel.cpp
@@ -175,7 +175,7 @@ void RecentRequestsTableModel::addNewRequest(const SendCoinsRecipient &recipient
newEntry.date = QDateTime::currentDateTime();
newEntry.recipient = recipient;
- CDataStream ss(SER_DISK, CLIENT_VERSION);
+ DataStream ss{};
ss << newEntry;
if (!walletModel->wallet().setAddressReceiveRequest(DecodeDestination(recipient.address.toStdString()), ToString(newEntry.id), ss.str()))
@@ -188,7 +188,7 @@ void RecentRequestsTableModel::addNewRequest(const SendCoinsRecipient &recipient
void RecentRequestsTableModel::addNewRequest(const std::string &recipient)
{
std::vector<uint8_t> data(recipient.begin(), recipient.end());
- CDataStream ss(data, SER_DISK, CLIENT_VERSION);
+ DataStream ss{data};
RecentRequestEntry entry;
ss >> entry;
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 59a5934890..15fe37c164 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -289,7 +289,7 @@ void TestGUI(interfaces::Node& node)
std::vector<std::string> requests = walletModel.wallet().getAddressReceiveRequests();
QCOMPARE(requests.size(), size_t{1});
RecentRequestEntry entry;
- CDataStream{MakeUCharSpan(requests[0]), SER_DISK, CLIENT_VERSION} >> entry;
+ DataStream{MakeUCharSpan(requests[0])} >> entry;
QCOMPARE(entry.nVersion, int{1});
QCOMPARE(entry.id, int64_t{1});
QVERIFY(entry.date.isValid());
diff --git a/src/rest.cpp b/src/rest.cpp
index add2bb73b0..a874f4eb6d 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -236,7 +236,7 @@ static bool rest_headers(const std::any& context,
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssHeader{};
for (const CBlockIndex *pindex : headers) {
ssHeader << pindex->GetBlockHeader();
}
@@ -248,7 +248,7 @@ static bool rest_headers(const std::any& context,
}
case RESTResponseFormat::HEX: {
- CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssHeader{};
for (const CBlockIndex *pindex : headers) {
ssHeader << pindex->GetBlockHeader();
}
@@ -435,7 +435,7 @@ static bool rest_filter_header(const std::any& context, HTTPRequest* req, const
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssHeader{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssHeader{};
for (const uint256& header : filter_headers) {
ssHeader << header;
}
@@ -446,7 +446,7 @@ static bool rest_filter_header(const std::any& context, HTTPRequest* req, const
return true;
}
case RESTResponseFormat::HEX: {
- CDataStream ssHeader{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssHeader{};
for (const uint256& header : filter_headers) {
ssHeader << header;
}
@@ -534,7 +534,7 @@ static bool rest_block_filter(const std::any& context, HTTPRequest* req, const s
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssResp{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssResp{};
ssResp << filter;
std::string binaryResp = ssResp.str();
@@ -543,7 +543,7 @@ static bool rest_block_filter(const std::any& context, HTTPRequest* req, const s
return true;
}
case RESTResponseFormat::HEX: {
- CDataStream ssResp{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssResp{};
ssResp << filter;
std::string strHex = HexStr(ssResp) + "\n";
@@ -793,7 +793,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
if (fInputParsed) //don't allow sending input over URI and HTTP RAW DATA
return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and raw post data is not allowed");
- CDataStream oss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream oss{};
oss << strRequestMutable;
oss >> fCheckMemPool;
oss >> vOutPoints;
@@ -866,7 +866,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
case RESTResponseFormat::BINARY: {
// serialize data
// use exact same output as mentioned in Bip64
- CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssGetUTXOResponse{};
ssGetUTXOResponse << active_height << active_hash << bitmap << outs;
std::string ssGetUTXOResponseString = ssGetUTXOResponse.str();
@@ -876,7 +876,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
}
case RESTResponseFormat::HEX: {
- CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssGetUTXOResponse{};
ssGetUTXOResponse << active_height << active_hash << bitmap << outs;
std::string strHex = HexStr(ssGetUTXOResponse) + "\n";
@@ -946,7 +946,7 @@ static bool rest_blockhash_by_height(const std::any& context, HTTPRequest* req,
}
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ss_blockhash(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss_blockhash{};
ss_blockhash << pblockindex->GetBlockHash();
req->WriteHeader("Content-Type", "application/octet-stream");
req->WriteReply(HTTP_OK, ss_blockhash.str());
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 1b2543c77a..8bee066ab8 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -460,7 +460,7 @@ static RPCHelpMan getblockfrompeer()
// Fetching blocks before the node has syncing past their height can prevent block files from
// being pruned, so we avoid it if the node is in prune mode.
- if (node::fPruneMode && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) {
+ if (chainman.m_blockman.IsPruneMode() && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) {
throw JSONRPCError(RPC_MISC_ERROR, "In prune mode, only blocks that the node has already synced previously can be fetched from a peer");
}
@@ -565,7 +565,7 @@ static RPCHelpMan getblockheader()
if (!fVerbose)
{
- CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssBlock{};
ssBlock << pblockindex->GetBlockHeader();
std::string strHex = HexStr(ssBlock);
return strHex;
@@ -775,10 +775,11 @@ static RPCHelpMan pruneblockchain()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- if (!node::fPruneMode)
+ ChainstateManager& chainman = EnsureAnyChainman(request.context);
+ if (!chainman.m_blockman.IsPruneMode()) {
throw JSONRPCError(RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode.");
+ }
- ChainstateManager& chainman = EnsureAnyChainman(request.context);
LOCK(cs_main);
Chainstate& active_chainstate = chainman.ActiveChainstate();
CChain& active_chain = active_chainstate.m_chain;
@@ -1266,15 +1267,15 @@ RPCHelpMan getblockchaininfo()
obj.pushKV("initialblockdownload", active_chainstate.IsInitialBlockDownload());
obj.pushKV("chainwork", tip.nChainWork.GetHex());
obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage());
- obj.pushKV("pruned", node::fPruneMode);
- if (node::fPruneMode) {
+ obj.pushKV("pruned", chainman.m_blockman.IsPruneMode());
+ if (chainman.m_blockman.IsPruneMode()) {
obj.pushKV("pruneheight", chainman.m_blockman.GetFirstStoredBlock(tip)->nHeight);
// if 0, execution bypasses the whole if block.
bool automatic_pruning{args.GetIntArg("-prune", 0) != 1};
obj.pushKV("automatic_pruning", automatic_pruning);
if (automatic_pruning) {
- obj.pushKV("prune_target_size", node::nPruneTarget);
+ obj.pushKV("prune_target_size", chainman.m_blockman.GetPruneTarget());
}
}
diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp
index 8eae2ef884..24b5d04115 100644
--- a/src/rpc/txoutproof.cpp
+++ b/src/rpc/txoutproof.cpp
@@ -112,7 +112,7 @@ static RPCHelpMan gettxoutproof()
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block");
}
- CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
+ DataStream ssMB{};
CMerkleBlock mb(block, setTxids);
ssMB << mb;
std::string strHex = HexStr(ssMB);
@@ -138,7 +138,7 @@ static RPCHelpMan verifytxoutproof()
RPCExamples{""},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- CDataStream ssMB(ParseHexV(request.params[0], "proof"), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
+ DataStream ssMB{ParseHexV(request.params[0], "proof")};
CMerkleBlock merkleBlock;
ssMB >> merkleBlock;
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 5815a059ae..72b9f2230e 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -4,11 +4,13 @@
#include <script/descriptor.h>
+#include <hash.h>
#include <key_io.h>
#include <pubkey.h>
#include <script/miniscript.h>
#include <script/script.h>
#include <script/standard.h>
+#include <uint256.h>
#include <span.h>
#include <util/bip32.h>
@@ -1618,8 +1620,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo
}
}
if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH)) {
- CScriptID scriptid;
- CRIPEMD160().Write(data[0].data(), data[0].size()).Finalize(scriptid.begin());
+ CScriptID scriptid{RIPEMD160(data[0])};
CScript subscript;
if (provider.GetCScript(scriptid, subscript)) {
auto sub = InferScript(subscript, ParseScriptContext::P2WSH, provider);
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index d369d4960d..70df9ee62c 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -286,7 +286,6 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
std::vector<valtype>& ret, TxoutType& whichTypeRet, SigVersion sigversion, SignatureData& sigdata)
{
CScript scriptRet;
- uint160 h160;
ret.clear();
std::vector<unsigned char> sig;
@@ -315,8 +314,8 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
ret.push_back(ToByteVector(pubkey));
return true;
}
- case TxoutType::SCRIPTHASH:
- h160 = uint160(vSolutions[0]);
+ case TxoutType::SCRIPTHASH: {
+ uint160 h160{vSolutions[0]};
if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) {
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
return true;
@@ -324,7 +323,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
// Could not find redeemScript, add to missing
sigdata.missing_redeem_script = h160;
return false;
-
+ }
case TxoutType::MULTISIG: {
size_t required = vSolutions.front()[0];
ret.push_back(valtype()); // workaround CHECKMULTISIG bug
@@ -350,8 +349,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
return true;
case TxoutType::WITNESS_V0_SCRIPTHASH:
- CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(h160.begin());
- if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) {
+ if (GetCScript(provider, sigdata, CScriptID{RIPEMD160(vSolutions[0])}, scriptRet)) {
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
return true;
}
diff --git a/src/script/sign.h b/src/script/sign.h
index b32bb55dd3..263fb61fc5 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -13,6 +13,7 @@
#include <script/interpreter.h>
#include <script/keyorigin.h>
#include <script/standard.h>
+#include <uint256.h>
class CKey;
class CKeyID;
diff --git a/src/streams.h b/src/streams.h
index 4f2c3ffe76..c12ba8777a 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -182,16 +182,13 @@ public:
* >> and << read and write unformatted data using the above serialization templates.
* Fills with data in linear time; some stringstream implementations take N^2 time.
*/
-class CDataStream
+class DataStream
{
protected:
using vector_type = SerializeData;
vector_type vch;
vector_type::size_type m_read_pos{0};
- int nType;
- int nVersion;
-
public:
typedef vector_type::allocator_type allocator_type;
typedef vector_type::size_type size_type;
@@ -203,23 +200,9 @@ public:
typedef vector_type::const_iterator const_iterator;
typedef vector_type::reverse_iterator reverse_iterator;
- explicit CDataStream(int nTypeIn, int nVersionIn)
- : nType{nTypeIn},
- nVersion{nVersionIn} {}
-
- explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
- explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
- : vch(sp.data(), sp.data() + sp.size()),
- nType{nTypeIn},
- nVersion{nVersionIn} {}
-
- template <typename... Args>
- CDataStream(int nTypeIn, int nVersionIn, Args&&... args)
- : nType{nTypeIn},
- nVersion{nVersionIn}
- {
- ::SerializeMany(*this, std::forward<Args>(args)...);
- }
+ explicit DataStream() {}
+ explicit DataStream(Span<const uint8_t> sp) : DataStream{AsBytes(sp)} {}
+ explicit DataStream(Span<const value_type> sp) : vch(sp.data(), sp.data() + sp.size()) {}
std::string str() const
{
@@ -271,11 +254,6 @@ public:
bool eof() const { return size() == 0; }
int in_avail() const { return size(); }
- void SetType(int n) { nType = n; }
- int GetType() const { return nType; }
- void SetVersion(int n) { nVersion = n; }
- int GetVersion() const { return nVersion; }
-
void read(Span<value_type> dst)
{
if (dst.size() == 0) return;
@@ -283,7 +261,7 @@ public:
// Read from the beginning of the buffer
auto next_read_pos{CheckedAdd(m_read_pos, dst.size())};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
- throw std::ios_base::failure("CDataStream::read(): end of data");
+ throw std::ios_base::failure("DataStream::read(): end of data");
}
memcpy(dst.data(), &vch[m_read_pos], dst.size());
if (next_read_pos.value() == vch.size()) {
@@ -299,7 +277,7 @@ public:
// Ignore from the beginning of the buffer
auto next_read_pos{CheckedAdd(m_read_pos, num_ignore)};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
- throw std::ios_base::failure("CDataStream::ignore(): end of data");
+ throw std::ios_base::failure("DataStream::ignore(): end of data");
}
if (next_read_pos.value() == vch.size()) {
m_read_pos = 0;
@@ -324,7 +302,7 @@ public:
}
template<typename T>
- CDataStream& operator<<(const T& obj)
+ DataStream& operator<<(const T& obj)
{
// Serialize to this stream
::Serialize(*this, obj);
@@ -332,7 +310,7 @@ public:
}
template<typename T>
- CDataStream& operator>>(T&& obj)
+ DataStream& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
@@ -363,6 +341,50 @@ public:
}
};
+class CDataStream : public DataStream
+{
+private:
+ int nType;
+ int nVersion;
+
+public:
+ explicit CDataStream(int nTypeIn, int nVersionIn)
+ : nType{nTypeIn},
+ nVersion{nVersionIn} {}
+
+ explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
+ explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
+ : DataStream{sp},
+ nType{nTypeIn},
+ nVersion{nVersionIn} {}
+
+ template <typename... Args>
+ CDataStream(int nTypeIn, int nVersionIn, Args&&... args)
+ : nType{nTypeIn},
+ nVersion{nVersionIn}
+ {
+ ::SerializeMany(*this, std::forward<Args>(args)...);
+ }
+
+ int GetType() const { return nType; }
+ void SetVersion(int n) { nVersion = n; }
+ int GetVersion() const { return nVersion; }
+
+ template <typename T>
+ CDataStream& operator<<(const T& obj)
+ {
+ ::Serialize(*this, obj);
+ return *this;
+ }
+
+ template <typename T>
+ CDataStream& operator>>(T&& obj)
+ {
+ ::Unserialize(*this, obj);
+ return *this;
+ }
+};
+
template <typename IStream>
class BitStreamReader
{
diff --git a/src/sync.h b/src/sync.h
index 8ce2e7b124..7242a793ab 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -11,7 +11,7 @@
#include <logging/timer.h>
#endif
-#include <threadsafety.h>
+#include <threadsafety.h> // IWYU pragma: export
#include <util/macros.h>
#include <condition_variable>
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index e1dafc6bac..e23b7228e7 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -310,7 +310,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) {
req1.indexes[2] = 3;
req1.indexes[3] = 4;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req1;
BlockTransactionsRequest req2;
@@ -330,7 +330,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestDeserializationMaxTest) {
req0.blockhash = InsecureRand256();
req0.indexes.resize(1);
req0.indexes[0] = 0xffff;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req0;
BlockTransactionsRequest req1;
@@ -350,7 +350,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestDeserializationOverflowTest) {
req0.indexes[0] = 0x7000;
req0.indexes[1] = 0x10000 - 0x7000 - 2;
req0.indexes[2] = 0;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req0.blockhash;
WriteCompactSize(stream, req0.indexes.size());
WriteCompactSize(stream, req0.indexes[0]);
diff --git a/src/test/blockfilter_tests.cpp b/src/test/blockfilter_tests.cpp
index 43dca57217..9388b4c96a 100644
--- a/src/test/blockfilter_tests.cpp
+++ b/src/test/blockfilter_tests.cpp
@@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(blockfilter_basic_test)
// Test serialization/unserialization.
BlockFilter block_filter2;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << block_filter;
stream >> block_filter2;
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 3d6e103c9f..4888041204 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001");
@@ -66,7 +66,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001");
@@ -87,7 +87,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
uint160 hash = pubkey.GetID();
filter.insert(hash);
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<unsigned char> expected = ParseHex("038fc16b080000000000000001");
@@ -340,7 +340,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize)
for (unsigned int i = 0; i < vMatched.size(); i++)
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
- CDataStream merkleStream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream merkleStream{};
merkleStream << merkleBlock;
std::vector<uint8_t> expected = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101");
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index b5f961a239..312f417129 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -53,9 +53,9 @@ public:
uint256 GetBestBlock() const override { return hashBestBlock_; }
- bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock) override
+ bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock, bool erase = true) override
{
- for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) {
+ for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = erase ? mapCoins.erase(it) : std::next(it)) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
// Same optimization used in CCoinsViewDB is to only write dirty entries.
map_[it->first] = it->second.coin;
@@ -64,7 +64,6 @@ public:
map_.erase(it->first);
}
}
- mapCoins.erase(it++);
}
if (!hashBlock.IsNull())
hashBestBlock_ = hashBlock;
@@ -126,6 +125,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
bool found_an_entry = false;
bool missed_an_entry = false;
bool uncached_an_entry = false;
+ bool flushed_without_erase = false;
// A simple map to track what we expect the cache stack to represent.
std::map<COutPoint, Coin> result;
@@ -154,9 +154,16 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
bool test_havecoin_after = InsecureRandBits(2) == 0;
bool result_havecoin = test_havecoin_before ? stack.back()->HaveCoin(COutPoint(txid, 0)) : false;
- const Coin& entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
+
+ // Infrequently, test usage of AccessByTxid instead of AccessCoin - the
+ // former just delegates to the latter and returns the first unspent in a txn.
+ const Coin& entry = (InsecureRandRange(500) == 0) ?
+ AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
BOOST_CHECK(coin == entry);
- BOOST_CHECK(!test_havecoin_before || result_havecoin == !entry.IsSpent());
+
+ if (test_havecoin_before) {
+ BOOST_CHECK(result_havecoin == !entry.IsSpent());
+ }
if (test_havecoin_after) {
bool ret = stack.back()->HaveCoin(COutPoint(txid, 0));
@@ -167,24 +174,29 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
Coin newcoin;
newcoin.out.nValue = InsecureRand32();
newcoin.nHeight = 1;
+
+ // Infrequently test adding unspendable coins.
if (InsecureRandRange(16) == 0 && coin.IsSpent()) {
newcoin.out.scriptPubKey.assign(1 + InsecureRandBits(6), OP_RETURN);
BOOST_CHECK(newcoin.out.scriptPubKey.IsUnspendable());
added_an_unspendable_entry = true;
} else {
- newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0); // Random sizes so we can test memory usage accounting
+ // Random sizes so we can test memory usage accounting
+ newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0);
(coin.IsSpent() ? added_an_entry : updated_an_entry) = true;
coin = newcoin;
}
- stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), !coin.IsSpent() || InsecureRand32() & 1);
+ bool is_overwrite = !coin.IsSpent() || InsecureRand32() & 1;
+ stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), is_overwrite);
} else {
+ // Spend the coin.
removed_an_entry = true;
coin.Clear();
BOOST_CHECK(stack.back()->SpendCoin(COutPoint(txid, 0)));
}
}
- // One every 10 iterations, remove a random entry from the cache
+ // Once every 10 iterations, remove a random entry from the cache
if (InsecureRandRange(10) == 0) {
COutPoint out(txids[InsecureRand32() % txids.size()], 0);
int cacheid = InsecureRand32() % stack.size();
@@ -216,7 +228,9 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
if (stack.size() > 1 && InsecureRandBool() == 0) {
unsigned int flushIndex = InsecureRandRange(stack.size() - 1);
if (fake_best_block) stack[flushIndex]->SetBestBlock(InsecureRand256());
- BOOST_CHECK(stack[flushIndex]->Flush());
+ bool should_erase = InsecureRandRange(4) < 3;
+ BOOST_CHECK(should_erase ? stack[flushIndex]->Flush() : stack[flushIndex]->Sync());
+ flushed_without_erase |= !should_erase;
}
}
if (InsecureRandRange(100) == 0) {
@@ -224,7 +238,9 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
if (stack.size() > 0 && InsecureRandBool() == 0) {
//Remove the top cache
if (fake_best_block) stack.back()->SetBestBlock(InsecureRand256());
- BOOST_CHECK(stack.back()->Flush());
+ bool should_erase = InsecureRandRange(4) < 3;
+ BOOST_CHECK(should_erase ? stack.back()->Flush() : stack.back()->Sync());
+ flushed_without_erase |= !should_erase;
delete stack.back();
stack.pop_back();
}
@@ -260,6 +276,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
BOOST_CHECK(found_an_entry);
BOOST_CHECK(missed_an_entry);
BOOST_CHECK(uncached_an_entry);
+ BOOST_CHECK(flushed_without_erase);
}
// Run the above simulation for multiple base types.
@@ -498,7 +515,7 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
BOOST_AUTO_TEST_CASE(ccoins_serialization)
{
// Good example
- CDataStream ss1(ParseHex("97f23c835800816115944e077fe7c803cfa57f29b36bf87c1d35"), SER_DISK, CLIENT_VERSION);
+ DataStream ss1{ParseHex("97f23c835800816115944e077fe7c803cfa57f29b36bf87c1d35")};
Coin cc1;
ss1 >> cc1;
BOOST_CHECK_EQUAL(cc1.fCoinBase, false);
@@ -507,7 +524,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(HexStr(cc1.out.scriptPubKey), HexStr(GetScriptForDestination(PKHash(uint160(ParseHex("816115944e077fe7c803cfa57f29b36bf87c1d35"))))));
// Good example
- CDataStream ss2(ParseHex("8ddf77bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4"), SER_DISK, CLIENT_VERSION);
+ DataStream ss2{ParseHex("8ddf77bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4")};
Coin cc2;
ss2 >> cc2;
BOOST_CHECK_EQUAL(cc2.fCoinBase, true);
@@ -516,7 +533,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(HexStr(cc2.out.scriptPubKey), HexStr(GetScriptForDestination(PKHash(uint160(ParseHex("8c988f1a4a4de2161e0f50aac7f17e7f9555caa4"))))));
// Smallest possible example
- CDataStream ss3(ParseHex("000006"), SER_DISK, CLIENT_VERSION);
+ DataStream ss3{ParseHex("000006")};
Coin cc3;
ss3 >> cc3;
BOOST_CHECK_EQUAL(cc3.fCoinBase, false);
@@ -525,7 +542,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(cc3.out.scriptPubKey.size(), 0U);
// scriptPubKey that ends beyond the end of the stream
- CDataStream ss4(ParseHex("000007"), SER_DISK, CLIENT_VERSION);
+ DataStream ss4{ParseHex("000007")};
try {
Coin cc4;
ss4 >> cc4;
@@ -534,11 +551,11 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
}
// Very large scriptPubKey (3*10^9 bytes) past the end of the stream
- CDataStream tmp(SER_DISK, CLIENT_VERSION);
+ DataStream tmp{};
uint64_t x = 3000000000ULL;
tmp << VARINT(x);
BOOST_CHECK_EQUAL(HexStr(tmp), "8a95c0bb00");
- CDataStream ss5(ParseHex("00008a95c0bb00"), SER_DISK, CLIENT_VERSION);
+ DataStream ss5{ParseHex("00008a95c0bb00")};
try {
Coin cc5;
ss5 >> cc5;
@@ -589,9 +606,9 @@ static size_t InsertCoinsMapEntry(CCoinsMap& map, CAmount value, char flags)
return inserted.first->second.coin.DynamicMemoryUsage();
}
-void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags)
+void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags, const COutPoint& outp = OUTPOINT)
{
- auto it = map.find(OUTPOINT);
+ auto it = map.find(outp);
if (it == map.end()) {
value = ABSENT;
flags = NO_ENTRY;
@@ -877,4 +894,205 @@ BOOST_AUTO_TEST_CASE(ccoins_write)
CheckWriteCoins(parent_value, child_value, parent_value, parent_flags, child_flags, parent_flags);
}
+
+Coin MakeCoin()
+{
+ Coin coin;
+ coin.out.nValue = InsecureRand32();
+ coin.nHeight = InsecureRandRange(4096);
+ coin.fCoinBase = 0;
+ return coin;
+}
+
+
+//! For CCoinsViewCache instances backed by either another cache instance or
+//! leveldb, test cache behavior and flag state (DIRTY/FRESH) by
+//!
+//! 1. Adding a random coin to the child-most cache,
+//! 2. Flushing all caches (without erasing),
+//! 3. Ensure the entry still exists in the cache and has been written to parent,
+//! 4. (if `do_erasing_flush`) Flushing the caches again (with erasing),
+//! 5. (if `do_erasing_flush`) Ensure the entry has been written to the parent and is no longer in the cache,
+//! 6. Spend the coin, ensure it no longer exists in the parent.
+//!
+void TestFlushBehavior(
+ CCoinsViewCacheTest* view,
+ CCoinsViewDB& base,
+ std::vector<CCoinsViewCacheTest*>& all_caches,
+ bool do_erasing_flush)
+{
+ CAmount value;
+ char flags;
+ size_t cache_usage;
+
+ auto flush_all = [&all_caches](bool erase) {
+ // Flush in reverse order to ensure that flushes happen from children up.
+ for (auto i = all_caches.rbegin(); i != all_caches.rend(); ++i) {
+ auto cache = *i;
+ // hashBlock must be filled before flushing to disk; value is
+ // unimportant here. This is normally done during connect/disconnect block.
+ cache->SetBestBlock(InsecureRand256());
+ erase ? cache->Flush() : cache->Sync();
+ }
+ };
+
+ uint256 txid = InsecureRand256();
+ COutPoint outp = COutPoint(txid, 0);
+ Coin coin = MakeCoin();
+ // Ensure the coins views haven't seen this coin before.
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!view->HaveCoin(outp));
+
+ // --- 1. Adding a random coin to the child cache
+ //
+ view->AddCoin(outp, Coin(coin), false);
+
+ cache_usage = view->DynamicMemoryUsage();
+ // `base` shouldn't have coin (no flush yet) but `view` should have cached it.
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(view->HaveCoin(outp));
+
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, DIRTY|FRESH);
+
+ // --- 2. Flushing all caches (without erasing)
+ //
+ flush_all(/*erase=*/ false);
+
+ // CoinsMap usage should be unchanged since we didn't erase anything.
+ BOOST_CHECK_EQUAL(cache_usage, view->DynamicMemoryUsage());
+
+ // --- 3. Ensuring the entry still exists in the cache and has been written to parent
+ //
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, 0); // Flags should have been wiped.
+
+ // Both views should now have the coin.
+ BOOST_CHECK(base.HaveCoin(outp));
+ BOOST_CHECK(view->HaveCoin(outp));
+
+ if (do_erasing_flush) {
+ // --- 4. Flushing the caches again (with erasing)
+ //
+ flush_all(/*erase=*/ true);
+
+ // Memory usage should have gone down.
+ BOOST_CHECK(view->DynamicMemoryUsage() < cache_usage);
+
+ // --- 5. Ensuring the entry is no longer in the cache
+ //
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, ABSENT);
+ BOOST_CHECK_EQUAL(flags, NO_ENTRY);
+
+ view->AccessCoin(outp);
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, 0);
+ }
+
+ // Can't overwrite an entry without specifying that an overwrite is
+ // expected.
+ BOOST_CHECK_THROW(
+ view->AddCoin(outp, Coin(coin), /*possible_overwrite=*/ false),
+ std::logic_error);
+
+ // --- 6. Spend the coin.
+ //
+ BOOST_CHECK(view->SpendCoin(outp));
+
+ // The coin should be in the cache, but spent and marked dirty.
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, SPENT);
+ BOOST_CHECK_EQUAL(flags, DIRTY);
+ BOOST_CHECK(!view->HaveCoin(outp)); // Coin should be considered spent in `view`.
+ BOOST_CHECK(base.HaveCoin(outp)); // But coin should still be unspent in `base`.
+
+ flush_all(/*erase=*/ false);
+
+ // Coin should be considered spent in both views.
+ BOOST_CHECK(!view->HaveCoin(outp));
+ BOOST_CHECK(!base.HaveCoin(outp));
+
+ // Spent coin should not be spendable.
+ BOOST_CHECK(!view->SpendCoin(outp));
+
+ // --- Bonus check: ensure that a coin added to the base view via one cache
+ // can be spent by another cache which has never seen it.
+ //
+ txid = InsecureRand256();
+ outp = COutPoint(txid, 0);
+ coin = MakeCoin();
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ all_caches[0]->AddCoin(outp, std::move(coin), false);
+ all_caches[0]->Sync();
+ BOOST_CHECK(base.HaveCoin(outp));
+ BOOST_CHECK(all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoinInCache(outp));
+
+ BOOST_CHECK(all_caches[1]->SpendCoin(outp));
+ flush_all(/*erase=*/ false);
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ flush_all(/*erase=*/ true); // Erase all cache content.
+
+ // --- Bonus check 2: ensure that a FRESH, spent coin is deleted by Sync()
+ //
+ txid = InsecureRand256();
+ outp = COutPoint(txid, 0);
+ coin = MakeCoin();
+ CAmount coin_val = coin.out.nValue;
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ // Add and spend from same cache without flushing.
+ all_caches[0]->AddCoin(outp, std::move(coin), false);
+
+ // Coin should be FRESH in the cache.
+ GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin_val);
+ BOOST_CHECK_EQUAL(flags, DIRTY|FRESH);
+
+ // Base shouldn't have seen coin.
+ BOOST_CHECK(!base.HaveCoin(outp));
+
+ BOOST_CHECK(all_caches[0]->SpendCoin(outp));
+ all_caches[0]->Sync();
+
+ // Ensure there is no sign of the coin after spend/flush.
+ GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, ABSENT);
+ BOOST_CHECK_EQUAL(flags, NO_ENTRY);
+ BOOST_CHECK(!all_caches[0]->HaveCoinInCache(outp));
+ BOOST_CHECK(!base.HaveCoin(outp));
+}
+
+BOOST_AUTO_TEST_CASE(ccoins_flush_behavior)
+{
+ // Create two in-memory caches atop a leveldb view.
+ CCoinsViewDB base{"test", /*nCacheSize=*/ 1 << 23, /*fMemory=*/ true, /*fWipe=*/ false};
+ std::vector<CCoinsViewCacheTest*> caches;
+ caches.push_back(new CCoinsViewCacheTest(&base));
+ caches.push_back(new CCoinsViewCacheTest(caches.back()));
+
+ for (CCoinsViewCacheTest* view : caches) {
+ TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ false);
+ TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ true);
+ }
+
+ // Clean up the caches.
+ while (caches.size() > 0) {
+ delete caches.back();
+ caches.pop_back();
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index 9b369a5c50..d3eef7beb7 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -925,7 +925,7 @@ BOOST_AUTO_TEST_CASE(muhash_tests)
// Test MuHash3072 serialization
MuHash3072 serchk = FromInt(1); serchk *= FromInt(2);
std::string ser_exp = "1fa093295ea30a6a3acdc7b3f770fa538eff537528e990e2910e40bbcfd7f6696b1256901929094694b56316de342f593303dd12ac43e06dce1be1ff8301c845beb15468fff0ef002dbf80c29f26e6452bccc91b5cb9437ad410d2a67ea847887fa3c6a6553309946880fe20db2c73fe0641adbd4e86edfee0d9f8cd0ee1230898873dc13ed8ddcaf045c80faa082774279007a2253f8922ee3ef361d378a6af3ddaf180b190ac97e556888c36b3d1fb1c85aab9ccd46e3deaeb7b7cf5db067a7e9ff86b658cf3acd6662bbcce37232daa753c48b794356c020090c831a8304416e2aa7ad633c0ddb2f11be1be316a81be7f7e472071c042cb68faef549c221ebff209273638b741aba5a81675c45a5fa92fea4ca821d7a324cb1e1a2ccd3b76c4228ec8066dad2a5df6e1bd0de45c7dd5de8070bdb46db6c554cf9aefc9b7b2bbf9f75b1864d9f95005314593905c0109b71f703d49944ae94477b51dac10a816bb6d1c700bafabc8bd86fac8df24be519a2f2836b16392e18036cb13e48c5c010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
- CDataStream ss_chk(SER_DISK, PROTOCOL_VERSION);
+ DataStream ss_chk{};
ss_chk << serchk;
BOOST_CHECK_EQUAL(ser_exp, HexStr(ss_chk.str()));
@@ -938,7 +938,7 @@ BOOST_AUTO_TEST_CASE(muhash_tests)
BOOST_CHECK_EQUAL(HexStr(out), HexStr(out3));
// Test MuHash3072 overflow, meaning the internal data is larger than the modulus.
- CDataStream ss_max(ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), SER_DISK, PROTOCOL_VERSION);
+ DataStream ss_max{ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")};
MuHash3072 overflowchk;
ss_max >> overflowchk;
diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp
index 46026d8df3..e75dc3ce91 100644
--- a/src/test/fuzz/coins_view.cpp
+++ b/src/test/fuzz/coins_view.cpp
@@ -75,6 +75,9 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view)
(void)coins_view_cache.Flush();
},
[&] {
+ (void)coins_view_cache.Sync();
+ },
+ [&] {
coins_view_cache.SetBestBlock(ConsumeUInt256(fuzzed_data_provider));
},
[&] {
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 7965f90dc7..c0aefe6067 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -152,7 +152,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
const CScriptID script_id{u160};
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
uint256 deserialized_u256;
stream << u256;
@@ -217,7 +217,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
}
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
ser_writedata64(stream, u64);
const uint64_t deserialized_u64 = ser_readdata64(stream);
@@ -245,7 +245,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
}
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
WriteCompactSize(stream, u64);
try {
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
index e83606f032..ea6883c08d 100644
--- a/src/test/fuzz/key.cpp
+++ b/src/test/fuzz/key.cpp
@@ -111,7 +111,7 @@ FUZZ_TARGET_INIT(key, initialize_key)
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
pubkey.Serialize(data_stream);
CPubKey pubkey_deserialized;
diff --git a/src/test/fuzz/prevector.cpp b/src/test/fuzz/prevector.cpp
index c8fd9aca30..9cea32e304 100644
--- a/src/test/fuzz/prevector.cpp
+++ b/src/test/fuzz/prevector.cpp
@@ -59,8 +59,8 @@ public:
--pos;
assert(v == real_vector[pos]);
}
- CDataStream ss1(SER_DISK, 0);
- CDataStream ss2(SER_DISK, 0);
+ DataStream ss1{};
+ DataStream ss2{};
ss1 << real_vector;
ss2 << pre_vector;
assert(ss1.size() == ss2.size());
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index 361cfa6cb6..2578137471 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -253,7 +253,7 @@ std::string ConsumeScalarRPCArgument(FuzzedDataProvider& fuzzed_data_provider)
if (!opt_block_header) {
return;
}
- CDataStream data_stream{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream data_stream{};
data_stream << *opt_block_header;
r = HexStr(data_stream);
},
diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp
index 3c427b9bef..9890e4c0e5 100644
--- a/src/test/fuzz/string.cpp
+++ b/src/test/fuzz/string.cpp
@@ -196,7 +196,7 @@ FUZZ_TARGET(string)
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
std::string s;
auto limited_string = LIMITED_STRING(s, 10);
data_stream << random_string_1;
@@ -212,7 +212,7 @@ FUZZ_TARGET(string)
}
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
const auto limited_string = LIMITED_STRING(random_string_1, 10);
data_stream << limited_string;
std::string deserialized_string;
diff --git a/src/test/fuzz/tx_in.cpp b/src/test/fuzz/tx_in.cpp
index f8247c1fa4..fc16f80cde 100644
--- a/src/test/fuzz/tx_in.cpp
+++ b/src/test/fuzz/tx_in.cpp
@@ -14,12 +14,9 @@
FUZZ_TARGET(tx_in)
{
- CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream ds{buffer};
CTxIn tx_in;
try {
- int version;
- ds >> version;
- ds.SetVersion(version);
ds >> tx_in;
} catch (const std::ios_base::failure&) {
return;
diff --git a/src/test/fuzz/tx_out.cpp b/src/test/fuzz/tx_out.cpp
index 337b8e2771..806216fbf5 100644
--- a/src/test/fuzz/tx_out.cpp
+++ b/src/test/fuzz/tx_out.cpp
@@ -13,12 +13,9 @@
FUZZ_TARGET(tx_out)
{
- CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream ds{buffer};
CTxOut tx_out;
try {
- int version;
- ds >> version;
- ds.SetVersion(version);
ds >> tx_out;
} catch (const std::ios_base::failure&) {
return;
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index 21ed2f1080..d14fda7351 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
static CPubKey UnserializePubkey(const std::vector<uint8_t>& data)
{
- CDataStream stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream stream{};
stream << data;
CPubKey pubkey;
stream >> pubkey;
@@ -251,7 +251,7 @@ static unsigned int GetLen(unsigned char chHeader)
static void CmpSerializationPubkey(const CPubKey& pubkey)
{
- CDataStream stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream stream{};
stream << pubkey;
CPubKey pubkey2;
stream >> pubkey2;
diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp
index d6aee472a8..21e0dd2fc5 100644
--- a/src/test/pmt_tests.cpp
+++ b/src/test/pmt_tests.cpp
@@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(pmt_test1)
CPartialMerkleTree pmt1(vTxid, vMatch);
// serialize
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss{};
ss << pmt1;
// verify CPartialMerkleTree's size guarantees
diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp
index 4068775cfa..5f4d307048 100644
--- a/src/test/prevector_tests.cpp
+++ b/src/test/prevector_tests.cpp
@@ -66,8 +66,8 @@ class prevector_tester {
for (const T& v : reverse_iterate(const_pre_vector)) {
local_check(v == real_vector[--pos]);
}
- CDataStream ss1(SER_DISK, 0);
- CDataStream ss2(SER_DISK, 0);
+ DataStream ss1{};
+ DataStream ss2{};
ss1 << real_vector;
ss2 << pre_vector;
local_check_equal(ss1.size(), ss2.size());
diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp
index ed1f081913..f6af32cf6c 100644
--- a/src/test/serfloat_tests.cpp
+++ b/src/test/serfloat_tests.cpp
@@ -111,7 +111,7 @@ Python code to generate the below hashes:
*/
BOOST_AUTO_TEST_CASE(doubles)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
// encode
for (int i = 0; i < 1000; i++) {
ss << EncodeDouble(i);
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index c90ae38ae8..f583109e16 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -90,8 +90,8 @@ BOOST_AUTO_TEST_CASE(varints)
{
// encode
- CDataStream ss(SER_DISK, 0);
- CDataStream::size_type size = 0;
+ DataStream ss{};
+ DataStream::size_type size = 0;
for (int i = 0; i < 100000; i++) {
ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED);
size += ::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
@@ -120,7 +120,7 @@ BOOST_AUTO_TEST_CASE(varints)
BOOST_AUTO_TEST_CASE(varints_bitpatterns)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
ss << VARINT_MODE(int8_t{0x7f}, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(varints_bitpatterns)
BOOST_AUTO_TEST_CASE(compactsize)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
std::vector<char>::size_type i, j;
for (i = 1; i <= MAX_SIZE; i *= 2)
@@ -182,7 +182,7 @@ BOOST_AUTO_TEST_CASE(noncanonical)
{
// Write some non-canonical CompactSize encodings, and
// make sure an exception is thrown when read back.
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
std::vector<char>::size_type n;
// zero encoded with three bytes:
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 03117f76ae..b7c1ce5066 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -128,9 +128,9 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue)
BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
{
- CDataStream data(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream data{};
- BitStreamWriter<CDataStream> bit_writer(data);
+ BitStreamWriter bit_writer{data};
bit_writer.Write(0, 1);
bit_writer.Write(2, 2);
bit_writer.Write(6, 3);
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
bit_writer.Write(30497, 16);
bit_writer.Flush();
- CDataStream data_copy(data);
+ DataStream data_copy{data};
uint32_t serialized_int1;
data >> serialized_int1;
BOOST_CHECK_EQUAL(serialized_int1, uint32_t{0x7700C35A}); // NOTE: Serialized as LE
@@ -149,7 +149,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
data >> serialized_int2;
BOOST_CHECK_EQUAL(serialized_int2, uint16_t{0x1072}); // NOTE: Serialized as LE
- BitStreamReader<CDataStream> bit_reader(data_copy);
+ BitStreamReader bit_reader{data_copy};
BOOST_CHECK_EQUAL(bit_reader.Read(1), 0U);
BOOST_CHECK_EQUAL(bit_reader.Read(2), 2U);
BOOST_CHECK_EQUAL(bit_reader.Read(3), 6U);
@@ -167,7 +167,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
// Degenerate case
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0x00, 0x00});
BOOST_CHECK_EQUAL(""s, ds.str());
}
@@ -177,7 +177,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
// Single character key
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0xff});
BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str());
}
@@ -189,7 +189,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
in.push_back(std::byte{0x0f});
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0xff, 0x0f});
BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str());
}
diff --git a/src/test/uint256_tests.cpp b/src/test/uint256_tests.cpp
index bc206fc945..9caefe43e2 100644
--- a/src/test/uint256_tests.cpp
+++ b/src/test/uint256_tests.cpp
@@ -187,7 +187,7 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G
BOOST_CHECK(GetSerializeSize(R1L, PROTOCOL_VERSION) == 32);
BOOST_CHECK(GetSerializeSize(ZeroL, PROTOCOL_VERSION) == 32);
- CDataStream ss(0, PROTOCOL_VERSION);
+ DataStream ss{};
ss << R1L;
BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+32));
ss >> TmpL;
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 1b28e5f2c0..6e72f69968 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -208,23 +208,24 @@ ChainTestingSetup::~ChainTestingSetup()
void TestingSetup::LoadVerifyActivateChainstate()
{
+ auto& chainman{*Assert(m_node.chainman)};
node::ChainstateLoadOptions options;
options.mempool = Assert(m_node.mempool.get());
options.block_tree_db_in_memory = m_block_tree_db_in_memory;
options.coins_db_in_memory = m_coins_db_in_memory;
options.reindex = node::fReindex;
options.reindex_chainstate = m_args.GetBoolArg("-reindex-chainstate", false);
- options.prune = node::fPruneMode;
+ options.prune = chainman.m_blockman.IsPruneMode();
options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
options.check_level = m_args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
- auto [status, error] = LoadChainstate(*Assert(m_node.chainman), m_cache_sizes, options);
+ auto [status, error] = LoadChainstate(chainman, m_cache_sizes, options);
assert(status == node::ChainstateLoadStatus::SUCCESS);
- std::tie(status, error) = VerifyLoadedChainstate(*Assert(m_node.chainman), options);
+ std::tie(status, error) = VerifyLoadedChainstate(chainman, options);
assert(status == node::ChainstateLoadStatus::SUCCESS);
BlockValidationState state;
- if (!m_node.chainman->ActiveChainstate().ActivateBestChain(state)) {
+ if (!chainman.ActiveChainstate().ActivateBestChain(state)) {
throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
}
diff --git a/src/txdb.cpp b/src/txdb.cpp
index f04a4e9800..c12b540b9b 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -111,7 +111,7 @@ std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
return vhashHeadBlocks;
}
-bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
+bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) {
CDBBatch batch(*m_db);
size_t count = 0;
size_t changed = 0;
@@ -146,8 +146,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
changed++;
}
count++;
- CCoinsMap::iterator itOld = it++;
- mapCoins.erase(itOld);
+ it = erase ? mapCoins.erase(it) : std::next(it);
if (batch.SizeEstimate() > batch_size) {
LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
m_db->WriteBatch(batch);
diff --git a/src/txdb.h b/src/txdb.h
index 5a409d7dcc..e3422846c0 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -62,7 +62,7 @@ public:
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
std::vector<uint256> GetHeadBlocks() const override;
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override;
//! Whether an unsupported database format is used.
diff --git a/src/validation.cpp b/src/validation.cpp
index b42b398619..62ce1f1162 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -76,8 +76,6 @@ using node::BlockManager;
using node::BlockMap;
using node::CBlockIndexHeightOnlyComparator;
using node::CBlockIndexWorkComparator;
-using node::fImporting;
-using node::fPruneMode;
using node::fReindex;
using node::ReadBlockFromDisk;
using node::SnapshotMetadata;
@@ -1573,8 +1571,9 @@ bool Chainstate::IsInitialBlockDownload() const
LOCK(cs_main);
if (m_cached_finished_ibd.load(std::memory_order_relaxed))
return false;
- if (fImporting || fReindex)
+ if (m_chainman.m_blockman.LoadingBlocks()) {
return true;
+ }
if (m_chain.Tip() == nullptr)
return true;
if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) {
@@ -2411,7 +2410,7 @@ bool Chainstate::FlushStateToDisk(
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(m_blockman.cs_LastBlockFile);
- if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
+ if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
// make sure we don't prune above any of the prune locks bestblocks
// pruning is height-based
int last_prune{m_chain.Height()}; // last height we can prune
@@ -4097,7 +4096,7 @@ bool CVerifyDB::VerifyDB(
if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
break;
}
- if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
+ if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning or running under an assumeutxo snapshot, only go
// back as far as we have data.
LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 0d42f82f68..c619222b3e 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -479,8 +479,8 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
std::unique_ptr<DatabaseCursor> cursor = db.GetNewCursor();
if (cursor) {
while (fSuccess) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
+ DataStream ssValue{};
DatabaseCursor::Status ret1 = cursor->Next(ssKey, ssValue);
if (ret1 == DatabaseCursor::Status::DONE) {
break;
@@ -667,7 +667,7 @@ BerkeleyCursor::BerkeleyCursor(BerkeleyDatabase& database)
}
}
-DatabaseCursor::Status BerkeleyCursor::Next(CDataStream& ssKey, CDataStream& ssValue)
+DatabaseCursor::Status BerkeleyCursor::Next(DataStream& ssKey, DataStream& ssValue)
{
if (m_cursor == nullptr) return Status::FAIL;
// Read at cursor
@@ -682,10 +682,8 @@ DatabaseCursor::Status BerkeleyCursor::Next(CDataStream& ssKey, CDataStream& ssV
}
// Convert to streams
- ssKey.SetType(SER_DISK);
ssKey.clear();
ssKey.write({AsBytePtr(datKey.get_data()), datKey.get_size()});
- ssValue.SetType(SER_DISK);
ssValue.clear();
ssValue.write({AsBytePtr(datValue.get_data()), datValue.get_size()});
return Status::MORE;
@@ -755,7 +753,7 @@ std::string BerkeleyDatabaseVersion()
return DbEnv::version(nullptr, nullptr, nullptr);
}
-bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
+bool BerkeleyBatch::ReadKey(DataStream&& key, DataStream& value)
{
if (!pdb)
return false;
@@ -771,7 +769,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
return false;
}
-bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
+bool BerkeleyBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
{
if (!pdb)
return false;
@@ -786,7 +784,7 @@ bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwr
return (ret == 0);
}
-bool BerkeleyBatch::EraseKey(CDataStream&& key)
+bool BerkeleyBatch::EraseKey(DataStream&& key)
{
if (!pdb)
return false;
@@ -799,7 +797,7 @@ bool BerkeleyBatch::EraseKey(CDataStream&& key)
return (ret == 0 || ret == DB_NOTFOUND);
}
-bool BerkeleyBatch::HasKey(CDataStream&& key)
+bool BerkeleyBatch::HasKey(DataStream&& key)
{
if (!pdb)
return false;
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index 3881419d45..9d1d68ba43 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -194,17 +194,17 @@ public:
explicit BerkeleyCursor(BerkeleyDatabase& database);
~BerkeleyCursor() override;
- Status Next(CDataStream& key, CDataStream& value) override;
+ Status Next(DataStream& key, DataStream& value) override;
};
/** RAII class that provides access to a Berkeley database */
class BerkeleyBatch : public DatabaseBatch
{
private:
- bool ReadKey(CDataStream&& key, CDataStream& value) override;
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override;
- bool EraseKey(CDataStream&& key) override;
- bool HasKey(CDataStream&& key) override;
+ bool ReadKey(DataStream&& key, DataStream& value) override;
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override;
+ bool EraseKey(DataStream&& key) override;
+ bool HasKey(DataStream&& key) override;
protected:
Db* pdb;
diff --git a/src/wallet/db.h b/src/wallet/db.h
index d040af0d14..287fb1d19e 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -38,17 +38,17 @@ public:
DONE,
};
- virtual Status Next(CDataStream& key, CDataStream& value) { return Status::FAIL; }
+ virtual Status Next(DataStream& key, DataStream& value) { return Status::FAIL; }
};
/** RAII class that provides access to a WalletDatabase */
class DatabaseBatch
{
private:
- virtual bool ReadKey(CDataStream&& key, CDataStream& value) = 0;
- virtual bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) = 0;
- virtual bool EraseKey(CDataStream&& key) = 0;
- virtual bool HasKey(CDataStream&& key) = 0;
+ virtual bool ReadKey(DataStream&& key, DataStream& value) = 0;
+ virtual bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) = 0;
+ virtual bool EraseKey(DataStream&& key) = 0;
+ virtual bool HasKey(DataStream&& key) = 0;
public:
explicit DatabaseBatch() {}
@@ -63,7 +63,7 @@ public:
template <typename K, typename T>
bool Read(const K& key, T& value)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -80,7 +80,7 @@ public:
template <typename K, typename T>
bool Write(const K& key, const T& value, bool fOverwrite = true)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -94,7 +94,7 @@ public:
template <typename K>
bool Erase(const K& key)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -104,7 +104,7 @@ public:
template <typename K>
bool Exists(const K& key)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -175,17 +175,17 @@ public:
class DummyCursor : public DatabaseCursor
{
- Status Next(CDataStream& key, CDataStream& value) override { return Status::FAIL; }
+ Status Next(DataStream& key, DataStream& value) override { return Status::FAIL; }
};
/** RAII class that provides access to a DummyDatabase. Never fails. */
class DummyBatch : public DatabaseBatch
{
private:
- bool ReadKey(CDataStream&& key, CDataStream& value) override { return true; }
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return true; }
- bool EraseKey(CDataStream&& key) override { return true; }
- bool HasKey(CDataStream&& key) override { return true; }
+ bool ReadKey(DataStream&& key, DataStream& value) override { return true; }
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override { return true; }
+ bool EraseKey(DataStream&& key) override { return true; }
+ bool HasKey(DataStream&& key) override { return true; }
public:
void Flush() override {}
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index 901e23b90a..403ec711ff 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -67,8 +67,8 @@ bool DumpWallet(const ArgsManager& args, CWallet& wallet, bilingual_str& error)
// Read the records
while (true) {
- CDataStream ss_key(SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{};
+ DataStream ss_value{};
DatabaseCursor::Status status = cursor->Next(ss_key, ss_value);
if (status == DatabaseCursor::Status::DONE) {
ret = true;
@@ -255,8 +255,8 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
std::vector<unsigned char> k = ParseHex(key);
std::vector<unsigned char> v = ParseHex(value);
- CDataStream ss_key(k, SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(v, SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{k};
+ DataStream ss_value{v};
if (!batch->Write(ss_key, ss_value)) {
error = strprintf(_("Error: Unable to write record to new wallet"));
diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp
index 71a913c9e0..93a6bbde20 100644
--- a/src/wallet/rpc/backup.cpp
+++ b/src/wallet/rpc/backup.cpp
@@ -6,6 +6,7 @@
#include <clientversion.h>
#include <core_io.h>
#include <fs.h>
+#include <hash.h>
#include <interfaces/chain.h>
#include <key_io.h>
#include <merkleblock.h>
@@ -14,6 +15,7 @@
#include <script/script.h>
#include <script/standard.h>
#include <sync.h>
+#include <uint256.h>
#include <util/bip32.h>
#include <util/system.h>
#include <util/time.h>
@@ -334,7 +336,7 @@ RPCHelpMan importprunedfunds()
}
uint256 hashTx = tx.GetHash();
- CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssMB{ParseHexV(request.params[1], "proof")};
CMerkleBlock merkleBlock;
ssMB >> merkleBlock;
@@ -886,9 +888,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d
}
case TxoutType::WITNESS_V0_SCRIPTHASH: {
if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2WSH inside another P2WSH");
- uint256 fullid(solverdata[0]);
- CScriptID id;
- CRIPEMD160().Write(fullid.begin(), fullid.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(solverdata[0])};
auto subscript = std::move(import_data.witnessscript); // Remove redeemscript from import_data to check for superfluous script later.
if (!subscript) return "missing witnessscript";
if (CScriptID(*subscript) != id) return "witnessScript does not match the scriptPubKey or redeemScript";
diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp
index 32151d5b5c..4c386789f1 100644
--- a/src/wallet/rpc/coins.cpp
+++ b/src/wallet/rpc/coins.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <core_io.h>
+#include <hash.h>
#include <key_io.h>
#include <rpc/util.h>
#include <util/moneystr.h>
@@ -679,8 +680,7 @@ RPCHelpMan listunspent()
CHECK_NONFATAL(extracted);
// Also return the witness script
const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(witness_destination);
- CScriptID id;
- CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(whash)};
CScript witnessScript;
if (provider->GetCScript(id, witnessScript)) {
entry.pushKV("witnessScript", HexStr(witnessScript));
@@ -689,8 +689,7 @@ RPCHelpMan listunspent()
}
} else if (scriptPubKey.IsPayToWitnessScriptHash()) {
const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(address);
- CScriptID id;
- CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(whash)};
CScript witnessScript;
if (provider->GetCScript(id, witnessScript)) {
entry.pushKV("witnessScript", HexStr(witnessScript));
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index 9ba3c7fd2c..84f33e50b3 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -139,7 +139,7 @@ bool RecoverDatabaseFile(const ArgsManager& args, const fs::path& file_path, bil
for (KeyValPair& row : salvagedData)
{
/* Filter for only private key type KV pairs to be added to the salvaged wallet */
- CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{row.first};
CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION);
std::string strType, strErr;
bool fReadOK;
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index d8f34dd2b0..d8fb926c9a 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <hash.h>
#include <key_io.h>
#include <logging.h>
#include <outputtype.h>
@@ -166,9 +167,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
if (sigversion == IsMineSigVersion::TOP && !keystore.HaveCScript(CScriptID(CScript() << OP_0 << vSolutions[0]))) {
break;
}
- uint160 hash;
- CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(hash.begin());
- CScriptID scriptID = CScriptID(hash);
+ CScriptID scriptID{RIPEMD160(vSolutions[0])};
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0) : IsMineResult::SPENDABLE);
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 7a0c4b1806..4af49db609 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -385,7 +385,7 @@ void SQLiteBatch::Close()
}
}
-bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
+bool SQLiteBatch::ReadKey(DataStream&& key, DataStream& value)
{
if (!m_database.m_db) return false;
assert(m_read_stmt);
@@ -412,7 +412,7 @@ bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
return true;
}
-bool SQLiteBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
+bool SQLiteBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
{
if (!m_database.m_db) return false;
assert(m_insert_stmt && m_overwrite_stmt);
@@ -439,7 +439,7 @@ bool SQLiteBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrit
return res == SQLITE_DONE;
}
-bool SQLiteBatch::EraseKey(CDataStream&& key)
+bool SQLiteBatch::EraseKey(DataStream&& key)
{
if (!m_database.m_db) return false;
assert(m_delete_stmt);
@@ -457,7 +457,7 @@ bool SQLiteBatch::EraseKey(CDataStream&& key)
return res == SQLITE_DONE;
}
-bool SQLiteBatch::HasKey(CDataStream&& key)
+bool SQLiteBatch::HasKey(DataStream&& key)
{
if (!m_database.m_db) return false;
assert(m_read_stmt);
@@ -470,7 +470,7 @@ bool SQLiteBatch::HasKey(CDataStream&& key)
return res == SQLITE_ROW;
}
-DatabaseCursor::Status SQLiteCursor::Next(CDataStream& key, CDataStream& value)
+DatabaseCursor::Status SQLiteCursor::Next(DataStream& key, DataStream& value)
{
int res = sqlite3_step(m_cursor_stmt);
if (res == SQLITE_DONE) {
diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h
index b35d24fe46..c6745d7a7e 100644
--- a/src/wallet/sqlite.h
+++ b/src/wallet/sqlite.h
@@ -22,7 +22,7 @@ public:
explicit SQLiteCursor() {}
~SQLiteCursor() override;
- Status Next(CDataStream& key, CDataStream& value) override;
+ Status Next(DataStream& key, DataStream& value) override;
};
/** RAII class that provides access to a WalletDatabase */
@@ -38,10 +38,10 @@ private:
void SetupSQLStatements();
- bool ReadKey(CDataStream&& key, CDataStream& value) override;
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override;
- bool EraseKey(CDataStream&& key) override;
- bool HasKey(CDataStream&& key) override;
+ bool ReadKey(DataStream&& key, DataStream& value) override;
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override;
+ bool EraseKey(DataStream&& key) override;
+ bool HasKey(DataStream&& key) override;
public:
explicit SQLiteBatch(SQLiteDatabase& database);
diff --git a/src/wallet/test/util.cpp b/src/wallet/test/util.cpp
index f9fb99e11e..225871fd91 100644
--- a/src/wallet/test/util.cpp
+++ b/src/wallet/test/util.cpp
@@ -57,8 +57,8 @@ std::unique_ptr<WalletDatabase> DuplicateMockDatabase(WalletDatabase& database,
// Read all records from the original database and write them to the new one
while (true) {
- CDataStream key(SER_DISK, CLIENT_VERSION);
- CDataStream value(SER_DISK, CLIENT_VERSION);
+ DataStream key{};
+ DataStream value{};
DatabaseCursor::Status status = cursor->Next(key, value);
assert(status != DatabaseCursor::Status::FAIL);
if (status == DatabaseCursor::Status::DONE) break;
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 31fc3344cc..53321e98ee 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -910,7 +910,7 @@ BOOST_FIXTURE_TEST_CASE(ZapSelectTx, TestChain100Setup)
class FailCursor : public DatabaseCursor
{
public:
- Status Next(CDataStream& key, CDataStream& value) override { return Status::FAIL; }
+ Status Next(DataStream& key, DataStream& value) override { return Status::FAIL; }
};
/** RAII class that provides access to a FailDatabase. Which fails if needed. */
@@ -918,10 +918,10 @@ class FailBatch : public DatabaseBatch
{
private:
bool m_pass{true};
- bool ReadKey(CDataStream&& key, CDataStream& value) override { return m_pass; }
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return m_pass; }
- bool EraseKey(CDataStream&& key) override { return m_pass; }
- bool HasKey(CDataStream&& key) override { return m_pass; }
+ bool ReadKey(DataStream&& key, DataStream& value) override { return m_pass; }
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override { return m_pass; }
+ bool EraseKey(DataStream&& key) override { return m_pass; }
+ bool HasKey(DataStream&& key) override { return m_pass; }
public:
explicit FailBatch(bool pass) : m_pass(pass) {}
diff --git a/src/wallet/test/walletload_tests.cpp b/src/wallet/test/walletload_tests.cpp
index f74bf54d9e..f1feb28e7d 100644
--- a/src/wallet/test/walletload_tests.cpp
+++ b/src/wallet/test/walletload_tests.cpp
@@ -58,8 +58,8 @@ bool HasAnyRecordOfType(WalletDatabase& db, const std::string& key)
std::unique_ptr<DatabaseCursor> cursor = batch->GetNewCursor();
BOOST_CHECK(cursor);
while (true) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
+ DataStream ssValue{};
DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
assert(status != DatabaseCursor::Status::FAIL);
if (status == DatabaseCursor::Status::DONE) break;
diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h
index 6ad222864a..290ef4eaa9 100644
--- a/src/wallet/transaction.h
+++ b/src/wallet/transaction.h
@@ -293,6 +293,7 @@ public:
bool isAbandoned() const { return state<TxStateInactive>() && state<TxStateInactive>()->abandoned; }
bool isConflicted() const { return state<TxStateConflicted>(); }
+ bool isInactive() const { return state<TxStateInactive>(); }
bool isUnconfirmed() const { return !isAbandoned() && !isConflicted() && !isConfirmed(); }
bool isConfirmed() const { return state<TxStateConfirmed>(); }
const uint256& GetHash() const { return tx->GetHash(); }
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index b938858315..aadad258a9 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1065,6 +1065,33 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const
}
}
+ // Mark inactive coinbase transactions and their descendants as abandoned
+ if (wtx.IsCoinBase() && wtx.isInactive()) {
+ std::vector<CWalletTx*> txs{&wtx};
+
+ TxStateInactive inactive_state = TxStateInactive{/*abandoned=*/true};
+
+ while (!txs.empty()) {
+ CWalletTx* desc_tx = txs.back();
+ txs.pop_back();
+ desc_tx->m_state = inactive_state;
+ // Break caches since we have changed the state
+ desc_tx->MarkDirty();
+ batch.WriteTx(*desc_tx);
+ MarkInputsDirty(desc_tx->tx);
+ for (unsigned int i = 0; i < desc_tx->tx->vout.size(); ++i) {
+ COutPoint outpoint(desc_tx->GetHash(), i);
+ std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(outpoint);
+ for (TxSpends::const_iterator it = range.first; it != range.second; ++it) {
+ const auto wit = mapWallet.find(it->second);
+ if (wit != mapWallet.end()) {
+ txs.push_back(&wit->second);
+ }
+ }
+ }
+ }
+ }
+
//// debug print
WalletLogPrintf("AddToWallet %s %s%s\n", hash.ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : ""));
@@ -1274,7 +1301,11 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
wtx.MarkDirty();
batch.WriteTx(wtx);
NotifyTransactionChanged(wtx.GetHash(), CT_UPDATED);
- // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too
+ // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too.
+ // States are not permanent, so these transactions can become unabandoned if they are re-added to the
+ // mempool, or confirmed in a block, or conflicted.
+ // Note: If the reorged coinbase is re-added to the main chain, the descendants that have not had their
+ // states change will remain abandoned and will require manual broadcast if the user wants them.
for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) {
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(COutPoint(now, i));
for (TxSpends::const_iterator iter = range.first; iter != range.second; ++iter) {
@@ -3783,8 +3814,8 @@ bool CWallet::MigrateToSQLite(bilingual_str& error)
}
DatabaseCursor::Status status = DatabaseCursor::Status::FAIL;
while (true) {
- CDataStream ss_key(SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{};
+ DataStream ss_value{};
status = cursor->Next(ss_key, ss_value);
if (status != DatabaseCursor::Status::MORE) {
break;
@@ -3821,8 +3852,8 @@ bool CWallet::MigrateToSQLite(bilingual_str& error)
bool began = batch->TxnBegin();
assert(began); // This is a critical error, the new db could not be written to. The original db exists as a backup, but we should not continue execution.
for (const auto& [key, value] : records) {
- CDataStream ss_key(key, SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(value, SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{key};
+ DataStream ss_value{value};
if (!batch->Write(ss_key, ss_value)) {
batch->TxnAbort();
m_database->Close();
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 7c59b6aac0..2cd35ae40e 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -321,7 +321,7 @@ public:
};
static bool
-ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
+ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue,
CWalletScanState &wss, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
try {
@@ -759,7 +759,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
return true;
}
-bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn)
+bool ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn)
{
CWalletScanState dummy_wss;
LOCK(pwallet->cs_wallet);
@@ -822,7 +822,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
while (true)
{
// Read next record
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
if (status == DatabaseCursor::Status::DONE) {
@@ -993,8 +993,8 @@ DBErrors WalletBatch::FindWalletTxHashes(std::vector<uint256>& tx_hashes)
while (true)
{
// Read next record
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
+ DataStream ssValue{};
DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
if (status == DatabaseCursor::Status::DONE) {
break;
@@ -1116,8 +1116,8 @@ bool WalletBatch::EraseRecords(const std::unordered_set<std::string>& types)
while (true)
{
// Read next record
- CDataStream key(SER_DISK, CLIENT_VERSION);
- CDataStream value(SER_DISK, CLIENT_VERSION);
+ DataStream key{};
+ DataStream value{};
DatabaseCursor::Status status = cursor->Next(key, value);
if (status == DatabaseCursor::Status::DONE) {
break;
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 97e8fad278..c97356a71f 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -303,7 +303,7 @@ void MaybeCompactWalletDB(WalletContext& context);
using KeyFilterFn = std::function<bool(const std::string&)>;
//! Unserialize a given Key-Value pair and load it into the wallet
-bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr);
+bool ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr);
/** Return object for accessing dummy database with no read/write capabilities. */
std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase();