aboutsummaryrefslogtreecommitdiff
path: root/src/node/blockstorage.cpp
diff options
context:
space:
mode:
authorMartin Zumsande <mzumsande@gmail.com>2023-02-28 18:26:23 -0500
committerMartin Zumsande <mzumsande@gmail.com>2023-04-19 11:25:07 -0400
commit271c23e87f61276d7acab74e115b25a35144c8b4 (patch)
tree06f000d50ec4d37a44efdd2f245c5a9fc06a3133 /src/node/blockstorage.cpp
parentd908877c4774c2456eed09167a5f382758e4a8a6 (diff)
downloadbitcoin-271c23e87f61276d7acab74e115b25a35144c8b4.tar.xz
blockstorage: Adjust fastprune limit if block exceeds blockfile size
If the added block exceeds the blockfile size in test-only -fastprune mode, the node would get stuck in an infinite loop and run out of memory. Avoid this by raising the blockfile size to the size of the added block in this situation. Co-authored-by: TheCharlatan <seb.kung@gmail.com>
Diffstat (limited to 'src/node/blockstorage.cpp')
-rw-r--r--src/node/blockstorage.cpp13
1 files changed, 12 insertions, 1 deletions
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index af84e6d7e7..27b11af886 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -619,7 +619,18 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
bool finalize_undo = false;
if (!fKnown) {
- while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
+ unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
+ // Use smaller blockfiles in test-only -fastprune mode - but avoid
+ // the possibility of having a block not fit into the block file.
+ if (gArgs.GetBoolArg("-fastprune", false)) {
+ max_blockfile_size = 0x10000; // 64kiB
+ if (nAddSize >= max_blockfile_size) {
+ // dynamically adjust the blockfile size to be larger than the added size
+ max_blockfile_size = nAddSize + 1;
+ }
+ }
+ assert(nAddSize < max_blockfile_size);
+ while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
// when the undo file is keeping up with the block file, we want to flush it explicitly
// when it is lagging behind (more blocks arrive than are being connected), we let the
// undo block write case handle it