aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/chainparamsseeds.h8
-rw-r--r--src/index/base.cpp2
-rw-r--r--src/index/blockfilterindex.cpp12
-rw-r--r--src/index/coinstatsindex.cpp10
-rw-r--r--src/index/txindex.cpp18
-rw-r--r--src/init.cpp45
-rw-r--r--src/node/blockstorage.cpp326
-rw-r--r--src/node/blockstorage.h41
-rw-r--r--src/rpc/net.cpp4
-rw-r--r--src/test/data/tx_invalid.json4
-rw-r--r--src/test/fuzz/script_assets_test_minimizer.cpp17
-rw-r--r--src/validation.cpp285
-rw-r--r--src/validation.h35
-rw-r--r--src/wallet/test/wallet_tests.cpp1
14 files changed, 429 insertions, 379 deletions
diff --git a/src/chainparamsseeds.h b/src/chainparamsseeds.h
index cd7b806621..2e31daea83 100644
--- a/src/chainparamsseeds.h
+++ b/src/chainparamsseeds.h
@@ -1195,6 +1195,14 @@ static const uint8_t chainparams_seed_main[] = {
0x04,0x20,0x98,0xc6,0x44,0x27,0x90,0x41,0xa6,0x98,0xf9,0x25,0x6c,0x59,0x0f,0x06,0x6d,0x44,0x59,0x0e,0xb2,0x46,0xb0,0xa4,0x37,0x88,0x69,0x8f,0xc1,0x32,0xcd,0x9f,0x15,0xd7,0x20,0x8d,
0x04,0x20,0xaa,0x3a,0x16,0x86,0xea,0x59,0x09,0x04,0x78,0xe5,0x10,0x92,0xe1,0x1d,0xad,0xf7,0x56,0x2b,0xac,0xb0,0x97,0x29,0x63,0x30,0xf4,0x1b,0xcf,0xde,0xf3,0x28,0x0a,0x29,0x20,0x8d,
0x04,0x20,0xbc,0x27,0xae,0x89,0xc1,0x67,0x73,0x0a,0x08,0x02,0xdf,0xb7,0xcc,0x94,0xc7,0x9f,0xf4,0x72,0x7a,0x9b,0x20,0x0c,0x5c,0x11,0x3d,0x22,0xd6,0x13,0x88,0x66,0x74,0xbf,0x20,0x8d,
+ 0x05,0x20,0xfe,0x97,0xba,0x09,0x2a,0xa4,0x85,0x10,0xa1,0x04,0x7b,0x88,0x7a,0x5a,0x06,0x53,0x71,0x93,0x3b,0xf9,0xa2,0x2f,0xd9,0xe3,0x8f,0xa5,0xa2,0xac,0x1e,0x6c,0x6c,0x8c,0x20,0x8d,
+ 0x05,0x20,0x17,0x0c,0x56,0xce,0x72,0xa5,0xa0,0xe6,0x23,0x06,0xa3,0xc7,0x08,0x43,0x18,0xee,0x3a,0x46,0x35,0x5d,0x17,0xf6,0x78,0x96,0xa0,0x9c,0x51,0xef,0xbe,0x23,0xfd,0x71,0x20,0x8d,
+ 0x05,0x20,0x31,0x0f,0x30,0x0b,0x9d,0x70,0x0c,0x7c,0xf7,0x98,0x7e,0x1c,0xf4,0x33,0xdc,0x64,0x17,0xf7,0x00,0x7a,0x0c,0x04,0xb5,0x83,0xfc,0x5f,0xa6,0x52,0x39,0x79,0x63,0x87,0x20,0x8d,
+ 0x05,0x20,0x3e,0xe3,0xe0,0xa9,0xbc,0xf4,0x2e,0x59,0xd9,0x20,0xee,0xdf,0x74,0x61,0x4d,0x99,0x0c,0x5c,0x15,0x30,0x9b,0x72,0x16,0x79,0x15,0xf4,0x7a,0xca,0x34,0xcc,0x81,0x99,0x20,0x8d,
+ 0x05,0x20,0x3b,0x42,0x1c,0x25,0xf7,0xbf,0x79,0xed,0x6d,0x7d,0xef,0x65,0x30,0x7d,0xee,0x16,0x37,0x22,0x72,0x43,0x33,0x28,0x40,0xa3,0xaa,0xf4,0x48,0x49,0x67,0xb1,0x4b,0xfd,0x20,0x8d,
+ 0x05,0x20,0x7a,0x65,0xf7,0x47,0x42,0x9d,0x66,0x42,0x3b,0xb3,0xa7,0x03,0x6c,0x46,0x78,0x19,0x28,0x78,0x1e,0xa3,0x7c,0x67,0x44,0xb7,0x83,0x05,0xe3,0xfe,0xa5,0xe4,0x0a,0x6e,0x20,0x8d,
+ 0x05,0x20,0xb5,0x83,0x6f,0xb6,0x11,0xd8,0x0e,0xa8,0x57,0xda,0x15,0x20,0x5b,0x1a,0x6d,0x21,0x15,0x5a,0xbd,0xb4,0x17,0x11,0xc2,0xfb,0x0e,0xfc,0xde,0xe8,0x26,0x56,0xa8,0xac,0x20,0x8d,
+ 0x05,0x20,0xcc,0xaf,0x6c,0x3b,0xd0,0x13,0x76,0x23,0xc3,0x36,0xbb,0x64,0x4a,0x4a,0x06,0x93,0x69,0x6d,0xb0,0x10,0x6e,0x66,0xa4,0x61,0xf8,0x2d,0xe7,0x80,0x72,0x4d,0x53,0x94,0x20,0x8d,
};
static const uint8_t chainparams_seed_test[] = {
diff --git a/src/index/base.cpp b/src/index/base.cpp
index 9e637c9c6f..357c4fbaf9 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -13,7 +13,7 @@
#include <validation.h> // For g_chainman
#include <warnings.h>
-constexpr char DB_BEST_BLOCK = 'B';
+constexpr uint8_t DB_BEST_BLOCK{'B'};
constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 154d7a7027..b82b9915d5 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -24,9 +24,9 @@
* as big-endian so that sequential reads of filters by height are fast.
* Keys for the hash index have the type [DB_BLOCK_HASH, uint256].
*/
-constexpr char DB_BLOCK_HASH = 's';
-constexpr char DB_BLOCK_HEIGHT = 't';
-constexpr char DB_FILTER_POS = 'P';
+constexpr uint8_t DB_BLOCK_HASH{'s'};
+constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
+constexpr uint8_t DB_FILTER_POS{'P'};
constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB
/** The pre-allocation chunk size for fltr?????.dat files */
@@ -63,7 +63,7 @@ struct DBHeightKey {
template<typename Stream>
void Unserialize(Stream& s)
{
- char prefix = ser_readdata8(s);
+ const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure("Invalid format for block filter index DB height key");
}
@@ -77,7 +77,7 @@ struct DBHashKey {
explicit DBHashKey(const uint256& hash_in) : hash(hash_in) {}
SERIALIZE_METHODS(DBHashKey, obj) {
- char prefix = DB_BLOCK_HASH;
+ uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure("Invalid format for block filter index DB hash key");
@@ -149,7 +149,7 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& f
}
uint256 block_hash;
- std::vector<unsigned char> encoded_filter;
+ std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter));
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index c7c1f4b533..7c8b2b186e 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -12,9 +12,9 @@
#include <undo.h>
#include <validation.h>
-static constexpr char DB_BLOCK_HASH = 's';
-static constexpr char DB_BLOCK_HEIGHT = 't';
-static constexpr char DB_MUHASH = 'M';
+static constexpr uint8_t DB_BLOCK_HASH{'s'};
+static constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
+static constexpr uint8_t DB_MUHASH{'M'};
namespace {
@@ -66,7 +66,7 @@ struct DBHeightKey {
template <typename Stream>
void Unserialize(Stream& s)
{
- char prefix{static_cast<char>(ser_readdata8(s))};
+ const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure("Invalid format for coinstatsindex DB height key");
}
@@ -81,7 +81,7 @@ struct DBHashKey {
SERIALIZE_METHODS(DBHashKey, obj)
{
- char prefix{DB_BLOCK_HASH};
+ uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure("Invalid format for coinstatsindex DB hash key");
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index f41985c344..3feefe8619 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -4,20 +4,20 @@
#include <index/disktxpos.h>
#include <index/txindex.h>
+#include <node/blockstorage.h>
#include <node/ui_interface.h>
#include <shutdown.h>
#include <util/system.h>
#include <util/translation.h>
#include <validation.h>
-constexpr char DB_BEST_BLOCK = 'B';
-constexpr char DB_TXINDEX = 't';
-constexpr char DB_TXINDEX_BLOCK = 'T';
+constexpr uint8_t DB_BEST_BLOCK{'B'};
+constexpr uint8_t DB_TXINDEX{'t'};
+constexpr uint8_t DB_TXINDEX_BLOCK{'T'};
std::unique_ptr<TxIndex> g_txindex;
-
/** Access to the txindex database (indexes/txindex/) */
class TxIndex::DB : public BaseIndex::DB
{
@@ -60,8 +60,8 @@ bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_
*/
static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<unsigned char, uint256>& begin_key,
- const std::pair<unsigned char, uint256>& end_key)
+ const std::pair<uint8_t, uint256>& begin_key,
+ const std::pair<uint8_t, uint256>& end_key)
{
// Sync new DB changes to disk before deleting from old DB.
newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
@@ -113,9 +113,9 @@ bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator&
CDBBatch batch_newdb(*this);
CDBBatch batch_olddb(block_tree_db);
- std::pair<unsigned char, uint256> key;
- std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<unsigned char, uint256> prev_key = begin_key;
+ std::pair<uint8_t, uint256> key;
+ std::pair<uint8_t, uint256> begin_key{DB_TXINDEX, uint256()};
+ std::pair<uint8_t, uint256> prev_key = begin_key;
bool interrupted = false;
std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
diff --git a/src/init.cpp b/src/init.cpp
index 10404f9887..dbc4ea30c5 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -420,7 +420,7 @@ void SetupServerArgs(NodeContext& node)
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-addnode=<ip>", strprintf("Add a node to connect to and attempt to keep the connection open (see the addnode RPC help for more info). This option can be specified multiple times to add multiple nodes; connections are limited to %u at a time and are counted separately from the -maxconnections limit.", MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
argsman.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-bind=<addr>[:<port>][=onion]", strprintf("Bind to given address and always listen on it (default: 0.0.0.0). Use [host]:port notation for IPv6. Append =onion to tag any incoming connections to that address and port as incoming Tor connections (default: 127.0.0.1:%u=onion, testnet: 127.0.0.1:%u=onion, signet: 127.0.0.1:%u=onion, regtest: 127.0.0.1:%u=onion)", defaultBaseParams->OnionServiceTargetPort(), testnetBaseParams->OnionServiceTargetPort(), signetBaseParams->OnionServiceTargetPort(), regtestBaseParams->OnionServiceTargetPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
@@ -433,7 +433,7 @@ void SetupServerArgs(NodeContext& node)
argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u). This limit does not apply to connections manually added via -addnode or the addnode RPC, which have a separate limit of %u.", DEFAULT_MAX_PEER_CONNECTIONS, MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -602,47 +602,6 @@ static void BlockNotifyGenesisWait(const CBlockIndex* pBlockIndex)
}
}
-// If we're using -prune with -reindex, then delete block files that will be ignored by the
-// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
-// is missing, do the same here to delete any later block files after a gap. Also delete all
-// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
-// is in sync with what's actually on disk by the time we start downloading, so that pruning
-// works correctly.
-static void CleanupBlockRevFiles()
-{
- std::map<std::string, fs::path> mapBlockFiles;
-
- // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
- // Remove the rev files immediately and insert the blk file paths into an
- // ordered map keyed by block file index.
- LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
- fs::path blocksdir = gArgs.GetBlocksDirPath();
- for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
- if (fs::is_regular_file(*it) &&
- it->path().filename().string().length() == 12 &&
- it->path().filename().string().substr(8,4) == ".dat")
- {
- if (it->path().filename().string().substr(0,3) == "blk")
- mapBlockFiles[it->path().filename().string().substr(3,5)] = it->path();
- else if (it->path().filename().string().substr(0,3) == "rev")
- remove(it->path());
- }
- }
-
- // Remove all block files that aren't part of a contiguous set starting at
- // zero by walking the ordered map (keys are block file indices) by
- // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
- // start removing block files.
- int nContigCounter = 0;
- for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
- if (atoi(item.first) == nContigCounter) {
- nContigCounter++;
- continue;
- }
- remove(item.second);
- }
-}
-
#if HAVE_SYSTEM
static void StartupNotify(const ArgsManager& args)
{
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index daed6605e8..6c66c565ad 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -6,17 +6,310 @@
#include <chain.h>
#include <chainparams.h>
+#include <clientversion.h>
+#include <consensus/validation.h>
#include <flatfile.h>
#include <fs.h>
+#include <hash.h>
#include <pow.h>
#include <shutdown.h>
#include <signet.h>
#include <streams.h>
+#include <undo.h>
#include <util/system.h>
#include <validation.h>
-// From validation. TODO move here
-bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false);
+std::atomic_bool fImporting(false);
+std::atomic_bool fReindex(false);
+bool fHavePruned = false;
+bool fPruneMode = false;
+uint64_t nPruneTarget = 0;
+
+// TODO make namespace {
+RecursiveMutex cs_LastBlockFile;
+std::vector<CBlockFileInfo> vinfoBlockFile;
+int nLastBlockFile = 0;
+/** Global flag to indicate we should check to see if there are
+* block/undo files that should be deleted. Set on startup
+* or if we allocate more file space when we're in prune mode
+*/
+bool fCheckForPruning = false;
+
+/** Dirty block index entries. */
+std::set<CBlockIndex*> setDirtyBlockIndex;
+
+/** Dirty block file entries. */
+std::set<int> setDirtyFileInfo;
+// } // namespace
+
+static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
+static FlatFileSeq BlockFileSeq();
+static FlatFileSeq UndoFileSeq();
+
+bool IsBlockPruned(const CBlockIndex* pblockindex)
+{
+ return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
+// If we're using -prune with -reindex, then delete block files that will be ignored by the
+// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
+// is missing, do the same here to delete any later block files after a gap. Also delete all
+// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
+// is in sync with what's actually on disk by the time we start downloading, so that pruning
+// works correctly.
+void CleanupBlockRevFiles()
+{
+ std::map<std::string, fs::path> mapBlockFiles;
+
+ // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
+ // Remove the rev files immediately and insert the blk file paths into an
+ // ordered map keyed by block file index.
+ LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
+ fs::path blocksdir = gArgs.GetBlocksDirPath();
+ for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
+ if (fs::is_regular_file(*it) &&
+ it->path().filename().string().length() == 12 &&
+ it->path().filename().string().substr(8,4) == ".dat")
+ {
+ if (it->path().filename().string().substr(0, 3) == "blk") {
+ mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path();
+ } else if (it->path().filename().string().substr(0, 3) == "rev") {
+ remove(it->path());
+ }
+ }
+ }
+
+ // Remove all block files that aren't part of a contiguous set starting at
+ // zero by walking the ordered map (keys are block file indices) by
+ // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
+ // start removing block files.
+ int nContigCounter = 0;
+ for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
+ if (atoi(item.first) == nContigCounter) {
+ nContigCounter++;
+ continue;
+ }
+ remove(item.second);
+ }
+}
+
+std::string CBlockFileInfo::ToString() const
+{
+ return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
+}
+
+CBlockFileInfo* GetBlockFileInfo(size_t n)
+{
+ LOCK(cs_LastBlockFile);
+
+ return &vinfoBlockFile.at(n);
+}
+
+static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
+{
+ // Open history file to append
+ CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
+ if (fileout.IsNull()) {
+ return error("%s: OpenUndoFile failed", __func__);
+ }
+
+ // Write index header
+ unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
+ fileout << messageStart << nSize;
+
+ // Write undo data
+ long fileOutPos = ftell(fileout.Get());
+ if (fileOutPos < 0) {
+ return error("%s: ftell failed", __func__);
+ }
+ pos.nPos = (unsigned int)fileOutPos;
+ fileout << blockundo;
+
+ // calculate & write checksum
+ CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
+ hasher << hashBlock;
+ hasher << blockundo;
+ fileout << hasher.GetHash();
+
+ return true;
+}
+
+bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
+{
+ FlatFilePos pos = pindex->GetUndoPos();
+ if (pos.IsNull()) {
+ return error("%s: no undo data available", __func__);
+ }
+
+ // Open history file to read
+ CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
+ if (filein.IsNull()) {
+ return error("%s: OpenUndoFile failed", __func__);
+ }
+
+ // Read block
+ uint256 hashChecksum;
+ CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
+ try {
+ verifier << pindex->pprev->GetBlockHash();
+ verifier >> blockundo;
+ filein >> hashChecksum;
+ } catch (const std::exception& e) {
+ return error("%s: Deserialize or I/O error - %s", __func__, e.what());
+ }
+
+ // Verify checksum
+ if (hashChecksum != verifier.GetHash()) {
+ return error("%s: Checksum mismatch", __func__);
+ }
+
+ return true;
+}
+
+static void FlushUndoFile(int block_file, bool finalize = false)
+{
+ FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
+ if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
+ AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
+ }
+}
+
+void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
+{
+ LOCK(cs_LastBlockFile);
+ FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
+ if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
+ AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
+ }
+ // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
+ // e.g. during IBD or a sync after a node going offline
+ if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
+}
+
+uint64_t CalculateCurrentUsage()
+{
+ LOCK(cs_LastBlockFile);
+
+ uint64_t retval = 0;
+ for (const CBlockFileInfo& file : vinfoBlockFile) {
+ retval += file.nSize + file.nUndoSize;
+ }
+ return retval;
+}
+
+void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
+{
+ for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
+ FlatFilePos pos(*it, 0);
+ fs::remove(BlockFileSeq().FileName(pos));
+ fs::remove(UndoFileSeq().FileName(pos));
+ LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
+ }
+}
+
+static FlatFileSeq BlockFileSeq()
+{
+ return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
+}
+
+static FlatFileSeq UndoFileSeq()
+{
+ return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE);
+}
+
+FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly)
+{
+ return BlockFileSeq().Open(pos, fReadOnly);
+}
+
+/** Open an undo file (rev?????.dat) */
+static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly)
+{
+ return UndoFileSeq().Open(pos, fReadOnly);
+}
+
+fs::path GetBlockPosFilename(const FlatFilePos& pos)
+{
+ return BlockFileSeq().FileName(pos);
+}
+
+bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
+{
+ LOCK(cs_LastBlockFile);
+
+ unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+
+ bool finalize_undo = false;
+ if (!fKnown) {
+ while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
+ // when the undo file is keeping up with the block file, we want to flush it explicitly
+ // when it is lagging behind (more blocks arrive than are being connected), we let the
+ // undo block write case handle it
+ assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
+ finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
+ nFile++;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+ }
+ pos.nFile = nFile;
+ pos.nPos = vinfoBlockFile[nFile].nSize;
+ }
+
+ if ((int)nFile != nLastBlockFile) {
+ if (!fKnown) {
+ LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ }
+ FlushBlockFile(!fKnown, finalize_undo);
+ nLastBlockFile = nFile;
+ }
+
+ vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
+ if (fKnown) {
+ vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
+ } else {
+ vinfoBlockFile[nFile].nSize += nAddSize;
+ }
+
+ if (!fKnown) {
+ bool out_of_space;
+ size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode("Disk space is too low!", _("Disk space is too low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
+ }
+ }
+
+ setDirtyFileInfo.insert(nFile);
+ return true;
+}
+
+static bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
+{
+ pos.nFile = nFile;
+
+ LOCK(cs_LastBlockFile);
+
+ pos.nPos = vinfoBlockFile[nFile].nUndoSize;
+ vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ setDirtyFileInfo.insert(nFile);
+
+ bool out_of_space;
+ size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
+ }
+
+ return true;
+}
static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
@@ -41,6 +334,35 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
return true;
}
+bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+{
+ // Write undo information to disk
+ if (pindex->GetUndoPos().IsNull()) {
+ FlatFilePos _pos;
+ if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
+ return error("ConnectBlock(): FindUndoPos failed");
+ }
+ if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) {
+ return AbortNode(state, "Failed to write undo data");
+ }
+ // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
+ // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
+ // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
+ // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
+ // the FindBlockPos function
+ if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
+ FlushUndoFile(_pos.nFile, true);
+ }
+
+ // update nUndoPos in block index
+ pindex->nUndoPos = _pos.nPos;
+ pindex->nStatus |= BLOCK_HAVE_UNDO;
+ setDirtyBlockIndex.insert(pindex);
+ }
+
+ return true;
+}
+
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 3b546f0719..faf99dea81 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -12,7 +12,9 @@
#include <protocol.h> // For CMessageHeader::MessageStartChars
class ArgsManager;
+class BlockValidationState;
class CBlock;
+class CBlockFileInfo;
class CBlockIndex;
class CBlockUndo;
class CChain;
@@ -25,6 +27,44 @@ struct Params;
static constexpr bool DEFAULT_STOPAFTERBLOCKIMPORT{false};
+/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
+static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
+/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
+static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
+/** The maximum size of a blk?????.dat file (since 0.8) */
+static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
+
+extern std::atomic_bool fImporting;
+extern std::atomic_bool fReindex;
+/** Pruning-related variables and constants */
+/** True if any block files have ever been pruned. */
+extern bool fHavePruned;
+/** True if we're running in -prune mode. */
+extern bool fPruneMode;
+/** Number of MiB of block files that we're trying to stay below. */
+extern uint64_t nPruneTarget;
+
+//! Check whether the block associated with this index entry is pruned or not.
+bool IsBlockPruned(const CBlockIndex* pblockindex);
+
+void CleanupBlockRevFiles();
+
+/** Open a block file (blk?????.dat) */
+FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly = false);
+/** Translation to a filesystem path */
+fs::path GetBlockPosFilename(const FlatFilePos& pos);
+
+/** Get block file info entry for one block file */
+CBlockFileInfo* GetBlockFileInfo(size_t n);
+
+/** Calculate the amount of disk space the block & undo files currently use */
+uint64_t CalculateCurrentUsage();
+
+/**
+ * Actually unlink the specified files
+ */
+void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
+
/** Functions for disk access for blocks */
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
@@ -32,6 +72,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start);
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex);
+bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams);
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 2bfcaeafc9..382c6f11ff 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -277,7 +277,9 @@ static RPCHelpMan addnode()
"\nAttempts to add or remove a node from the addnode list.\n"
"Or try a connection to a node once.\n"
"Nodes added using addnode (or -connect) are protected from DoS disconnection and are not required to be\n"
- "full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n",
+ "full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n" +
+ strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) +
+ " and are counted separately from the -maxconnections limit.\n",
{
{"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"},
{"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"},
diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json
index 41bebab2a0..a47bc8f366 100644
--- a/src/test/data/tx_invalid.json
+++ b/src/test/data/tx_invalid.json
@@ -311,6 +311,10 @@
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0x34b6c399093e06cf9f0f7f660a1abcfe78fcf7b576f43993208edd9518a0ae9b", 1000]],
"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e803000000000000015101045102010100000000", "P2SH,WITNESS"],
+["P2WSH with an empty redeem should fail due to empty stack"],
+[[["3d4da21b04a67a54c8a58df1c53a0534b0a7f0864fb3d19abd43b8f6934e785f", 0, "0x00 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 1337]],
+"020000000001015f784e93f6b843bd9ad1b34f86f0a7b034053ac5f18da5c8547aa6041ba24d3d0000000000ffffffff013905000000000000220020e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855010000000000", "P2SH,WITNESS"],
+
["33 bytes push should be considered a witness scriptPubKey"],
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x60 0x21 0xff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3dbff", 1000]],
"010000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e803000000000000015100000000", "P2SH,WITNESS,DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM"],
diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp
index cec5212f42..a80338b965 100644
--- a/src/test/fuzz/script_assets_test_minimizer.cpp
+++ b/src/test/fuzz/script_assets_test_minimizer.cpp
@@ -133,8 +133,7 @@ unsigned int ParseScriptFlags(const std::string& str)
std::vector<std::string> words;
boost::algorithm::split(words, str, boost::algorithm::is_any_of(","));
- for (const std::string& word : words)
- {
+ for (const std::string& word : words) {
auto it = FLAG_NAMES.find(word);
if (it == FLAG_NAMES.end()) throw std::runtime_error("Unknown verification flag " + word);
flags |= it->second;
@@ -186,15 +185,19 @@ void Test(const std::string& str)
}
}
-ECCVerifyHandle handle;
-
-} // namespace
+void test_init()
+{
+ static ECCVerifyHandle handle;
+}
-FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, FuzzFrameworkEmptyInitFun, /* hidden */ true)
+FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, test_init, /* hidden */ true)
{
if (buffer.size() < 2 || buffer.back() != '\n' || buffer[buffer.size() - 2] != ',') return;
const std::string str((const char*)buffer.data(), buffer.size() - 2);
try {
Test(str);
- } catch (const std::runtime_error&) {}
+ } catch (const std::runtime_error&) {
+ }
}
+
+} // namespace
diff --git a/src/validation.cpp b/src/validation.cpp
index bf774a0791..54d062b63c 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -66,10 +66,6 @@
static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
/** Maximum kilobytes for transactions to store for processing during reorg */
static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
-/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
-static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
-/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
-static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
/** Time to wait between writing blocks/block index to disk. */
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
/** Time to wait between flushing chainstate to disk. */
@@ -135,14 +131,9 @@ Mutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
uint256 g_best_block;
bool g_parallel_script_checks{false};
-std::atomic_bool fImporting(false);
-std::atomic_bool fReindex(false);
-bool fHavePruned = false;
-bool fPruneMode = false;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
-uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
uint256 hashAssumeValid;
@@ -153,22 +144,17 @@ CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
// Internal stuff
namespace {
CBlockIndex* pindexBestInvalid = nullptr;
-
- RecursiveMutex cs_LastBlockFile;
- std::vector<CBlockFileInfo> vinfoBlockFile;
- int nLastBlockFile = 0;
- /** Global flag to indicate we should check to see if there are
- * block/undo files that should be deleted. Set on startup
- * or if we allocate more file space when we're in prune mode
- */
- bool fCheckForPruning = false;
-
- /** Dirty block index entries. */
- std::set<CBlockIndex*> setDirtyBlockIndex;
-
- /** Dirty block file entries. */
- std::set<int> setDirtyFileInfo;
-} // anon namespace
+} // namespace
+
+// Internal stuff from blockstorage ...
+extern RecursiveMutex cs_LastBlockFile;
+extern std::vector<CBlockFileInfo> vinfoBlockFile;
+extern int nLastBlockFile;
+extern bool fCheckForPruning;
+extern std::set<CBlockIndex*> setDirtyBlockIndex;
+extern std::set<int> setDirtyFileInfo;
+void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
+// ... TODO move fully to blockstorage
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
{
@@ -205,9 +191,6 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
std::vector<CScriptCheck>* pvChecks = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
-static FlatFileSeq BlockFileSeq();
-static FlatFileSeq UndoFileSeq();
bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
{
@@ -1462,65 +1445,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
return true;
}
-static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
-{
- // Open history file to append
- CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
- if (fileout.IsNull())
- return error("%s: OpenUndoFile failed", __func__);
-
- // Write index header
- unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
- fileout << messageStart << nSize;
-
- // Write undo data
- long fileOutPos = ftell(fileout.Get());
- if (fileOutPos < 0)
- return error("%s: ftell failed", __func__);
- pos.nPos = (unsigned int)fileOutPos;
- fileout << blockundo;
-
- // calculate & write checksum
- CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
- hasher << hashBlock;
- hasher << blockundo;
- fileout << hasher.GetHash();
-
- return true;
-}
-
-bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
-{
- FlatFilePos pos = pindex->GetUndoPos();
- if (pos.IsNull()) {
- return error("%s: no undo data available", __func__);
- }
-
- // Open history file to read
- CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (filein.IsNull())
- return error("%s: OpenUndoFile failed", __func__);
-
- // Read block
- uint256 hashChecksum;
- CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
- try {
- verifier << pindex->pprev->GetBlockHash();
- verifier >> blockundo;
- filein >> hashChecksum;
- }
- catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
- }
-
- // Verify checksum
- if (hashChecksum != verifier.GetHash())
- return error("%s: Checksum mismatch", __func__);
-
- return true;
-}
-
-static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
+bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
@@ -1620,55 +1545,6 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
-static void FlushUndoFile(int block_file, bool finalize = false)
-{
- FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
- if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
- AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
- }
-}
-
-static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
-{
- LOCK(cs_LastBlockFile);
- FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
- if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
- AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
- }
- // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
- // e.g. during IBD or a sync after a node going offline
- if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
-}
-
-static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
-
-static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
-{
- // Write undo information to disk
- if (pindex->GetUndoPos().IsNull()) {
- FlatFilePos _pos;
- if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
- return error("ConnectBlock(): FindUndoPos failed");
- if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
- return AbortNode(state, "Failed to write undo data");
- // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
- // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
- // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
- // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
- // the FindBlockPos function
- if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
- FlushUndoFile(_pos.nFile, true);
- }
-
- // update nUndoPos in block index
- pindex->nUndoPos = _pos.nPos;
- pindex->nStatus |= BLOCK_HAVE_UNDO;
- setDirtyBlockIndex.insert(pindex);
- }
-
- return true;
-}
-
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void StartScriptCheckWorkerThreads(int threads_num)
@@ -3102,84 +2978,6 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
}
}
-// TODO move to blockstorage
-bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
-{
- LOCK(cs_LastBlockFile);
-
- unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
- }
-
- bool finalize_undo = false;
- if (!fKnown) {
- while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
- // when the undo file is keeping up with the block file, we want to flush it explicitly
- // when it is lagging behind (more blocks arrive than are being connected), we let the
- // undo block write case handle it
- assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
- finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
- nFile++;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
- }
- }
- pos.nFile = nFile;
- pos.nPos = vinfoBlockFile[nFile].nSize;
- }
-
- if ((int)nFile != nLastBlockFile) {
- if (!fKnown) {
- LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
- }
- FlushBlockFile(!fKnown, finalize_undo);
- nLastBlockFile = nFile;
- }
-
- vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
- if (fKnown)
- vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
- else
- vinfoBlockFile[nFile].nSize += nAddSize;
-
- if (!fKnown) {
- bool out_of_space;
- size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
- if (out_of_space) {
- return AbortNode("Disk space is too low!", _("Disk space is too low!"));
- }
- if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
- }
- }
-
- setDirtyFileInfo.insert(nFile);
- return true;
-}
-
-static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
-{
- pos.nFile = nFile;
-
- LOCK(cs_LastBlockFile);
-
- pos.nPos = vinfoBlockFile[nFile].nUndoSize;
- vinfoBlockFile[nFile].nUndoSize += nAddSize;
- setDirtyFileInfo.insert(nFile);
-
- bool out_of_space;
- size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
- if (out_of_space) {
- return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
- }
- if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
- }
-
- return true;
-}
-
static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
{
// Check proof of work matches claimed amount
@@ -3737,18 +3535,6 @@ bool TestBlockValidity(BlockValidationState& state,
* BLOCK PRUNING CODE
*/
-/* Calculate the amount of disk space the block & undo files currently use */
-uint64_t CalculateCurrentUsage()
-{
- LOCK(cs_LastBlockFile);
-
- uint64_t retval = 0;
- for (const CBlockFileInfo &file : vinfoBlockFile) {
- retval += file.nSize + file.nUndoSize;
- }
- return retval;
-}
-
void BlockManager::PruneOneBlockFile(const int fileNumber)
{
AssertLockHeld(cs_main);
@@ -3783,17 +3569,6 @@ void BlockManager::PruneOneBlockFile(const int fileNumber)
setDirtyFileInfo.insert(fileNumber);
}
-
-void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
-{
- for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
- FlatFilePos pos(*it, 0);
- fs::remove(BlockFileSeq().FileName(pos));
- fs::remove(UndoFileSeq().FileName(pos));
- LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
- }
-}
-
void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
{
assert(fPruneMode && nManualPruneHeight > 0);
@@ -3888,30 +3663,6 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr
nLastBlockWeCanPrune, count);
}
-static FlatFileSeq BlockFileSeq()
-{
- return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
-}
-
-static FlatFileSeq UndoFileSeq()
-{
- return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE);
-}
-
-FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
- return BlockFileSeq().Open(pos, fReadOnly);
-}
-
-/** Open an undo file (rev?????.dat) */
-static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
- return UndoFileSeq().Open(pos, fReadOnly);
-}
-
-fs::path GetBlockPosFilename(const FlatFilePos &pos)
-{
- return BlockFileSeq().FileName(pos);
-}
-
CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
{
AssertLockHeld(cs_main);
@@ -4744,18 +4495,6 @@ bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
return ret;
}
-std::string CBlockFileInfo::ToString() const
-{
- return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
-}
-
-CBlockFileInfo* GetBlockFileInfo(size_t n)
-{
- LOCK(cs_LastBlockFile);
-
- return &vinfoBlockFile.at(n);
-}
-
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
diff --git a/src/validation.h b/src/validation.h
index 63a8bdc096..8fa45c3f77 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -27,6 +27,7 @@
#include <serialize.h>
#include <util/check.h>
#include <util/hasher.h>
+#include <util/translation.h>
#include <atomic>
#include <map>
@@ -70,8 +71,6 @@ static const unsigned int DEFAULT_DESCENDANT_LIMIT = 25;
static const unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT = 101;
/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
-/** The maximum size of a blk?????.dat file (since 0.8) */
-static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
/** Maximum number of dedicated script-checking threads allowed */
static const int MAX_SCRIPTCHECK_THREADS = 15;
/** -par default (number of script-checking threads, 0 = auto) */
@@ -113,8 +112,6 @@ typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern Mutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
extern uint256 g_best_block;
-extern std::atomic_bool fImporting;
-extern std::atomic_bool fReindex;
/** Whether there are dedicated script-checking threads running.
* False indicates all script checking is done on the main threadMessageHandler thread.
*/
@@ -136,20 +133,9 @@ extern arith_uint256 nMinimumChainWork;
/** Best header we've seen so far (used for getheaders queries' starting points). */
extern CBlockIndex *pindexBestHeader;
-/** Pruning-related variables and constants */
-/** True if any block files have ever been pruned. */
-extern bool fHavePruned;
-/** True if we're running in -prune mode. */
-extern bool fPruneMode;
-/** Number of MiB of block files that we're trying to stay below. */
-extern uint64_t nPruneTarget;
/** Documentation for argument 'checklevel'. */
extern const std::vector<std::string> CHECKLEVEL_DOC;
-/** Open a block file (blk?????.dat) */
-FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false);
-/** Translation to a filesystem path */
-fs::path GetBlockPosFilename(const FlatFilePos &pos);
/** Unload database information */
void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman);
/** Run instances of script checking worker threads */
@@ -171,17 +157,11 @@ void StopScriptCheckWorkerThreads();
CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock);
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
+bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str{});
+
/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pindex);
-/** Calculate the amount of disk space the block & undo files currently use */
-uint64_t CalculateCurrentUsage();
-
-/**
- * Actually unlink the specified files
- */
-void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
-
/** Prune block files up to a given height */
void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight);
@@ -1009,9 +989,6 @@ extern VersionBitsCache versionbitscache;
*/
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
-/** Get block file info entry for one block file */
-CBlockFileInfo* GetBlockFileInfo(size_t n);
-
using FopenFn = std::function<FILE*(const fs::path&, const char*)>;
/** Dump the mempool to disk. */
@@ -1020,12 +997,6 @@ bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function = fsbri
/** Load the mempool from disk. */
bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function = fsbridge::fopen);
-//! Check whether the block associated with this index entry is pruned or not.
-inline bool IsBlockPruned(const CBlockIndex* pblockindex)
-{
- return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
-}
-
/**
* Return the expected assumeutxo value for a given height, if one exists.
*
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 011d59ecaf..c8e3c8f819 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -11,6 +11,7 @@
#include <vector>
#include <interfaces/chain.h>
+#include <node/blockstorage.h>
#include <node/context.h>
#include <policy/policy.h>
#include <rpc/server.h>