diff options
Diffstat (limited to 'test/functional/feature_pruning.py')
-rwxr-xr-x | test/functional/feature_pruning.py | 107 |
1 files changed, 60 insertions, 47 deletions
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index c69d385580..49951e24f3 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -8,11 +8,13 @@ WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ +import os +from test_framework.blocktools import create_coinbase +from test_framework.messages import CBlock, ToHex +from test_framework.script import CScript, OP_RETURN, OP_NOP from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, mine_large_block, sync_blocks, wait_until - -import os +from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, sync_blocks, wait_until MIN_BLOCKS_TO_KEEP = 288 @@ -21,6 +23,47 @@ MIN_BLOCKS_TO_KEEP = 288 # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 +def mine_large_blocks(node, n): + # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN + # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase + # transaction but is consensus valid. + + # Get the block parameters for the first block + big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) + best_block = node.getblock(node.getbestblockhash()) + height = int(best_block["height"]) + 1 + try: + # Static variable ensures that time is monotonicly increasing and is therefore + # different for each block created => blockhash is unique. + mine_large_blocks.nTime = min(mine_large_blocks.nTime, int(best_block["time"])) + 1 + except AttributeError: + mine_large_blocks.nTime = int(best_block["time"]) + 1 + previousblockhash = int(best_block["hash"], 16) + + for _ in range(n): + # Build the coinbase transaction (with large scriptPubKey) + coinbase_tx = create_coinbase(height) + coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 + coinbase_tx.vout[0].scriptPubKey = big_script + coinbase_tx.rehash() + + # Build the block + block = CBlock() + block.nVersion = best_block["version"] + block.hashPrevBlock = previousblockhash + block.nTime = mine_large_blocks.nTime + block.nBits = int('207fffff', 16) + block.nNonce = 0 + block.vtx = [coinbase_tx] + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + # Submit to the node + node.submitblock(ToHex(block)) + + previousblockhash = block.sha256 + height += 1 + mine_large_blocks.nTime += 1 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) @@ -29,11 +72,10 @@ class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 - self.rpc_timeout = 900 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"] + self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [ @@ -73,8 +115,7 @@ class PruneTest(BitcoinTestFramework): self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data - for i in range(645): - mine_large_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 645) sync_blocks(self.nodes[0:5]) @@ -84,8 +125,7 @@ class PruneTest(BitcoinTestFramework): self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) self.log.info("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30) @@ -102,22 +142,13 @@ class PruneTest(BitcoinTestFramework): for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects - # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine - self.stop_node(0) - self.start_node(0, extra_args=self.full_node_default_args) + disconnect_nodes(self.nodes[0], 1) + disconnect_nodes(self.nodes[0], 2) # Mine 24 blocks in node 1 - for i in range(24): - if j == 0: - mine_large_block(self.nodes[1], self.utxo_cache_1) - else: - # Add node1's wallet transactions back to the mempool, to - # avoid the mined blocks from being too small. - self.nodes[1].resendwallettransactions() - self.nodes[1].generate(1) #tx's already in mempool from previous disconnects + mine_large_blocks(self.nodes[1], 24) # Reorg back with 25 block chain from node 0 - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) + mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg at the same time connect_nodes(self.nodes[0], 1) @@ -129,10 +160,6 @@ class PruneTest(BitcoinTestFramework): def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain - # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) - # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) @@ -153,9 +180,9 @@ class PruneTest(BitcoinTestFramework): assert self.nodes[1].getblockcount() == self.forkheight - 1 self.log.info("New best height: %d" % self.nodes[1].getblockcount()) - # Reboot node1 to clear those giant tx's from mempool - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"]) + # Disconnect node1 and generate the new chain + disconnect_nodes(self.nodes[0], 1) + disconnect_nodes(self.nodes[1], 2) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) @@ -167,17 +194,10 @@ class PruneTest(BitcoinTestFramework): self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir)) - self.log.info("Mine 220 more large blocks so we have requisite history") - # Get node0's wallet transactions back in its mempool, to avoid the - # mined blocks from being too small. - self.nodes[0].resendwallettransactions() + self.log.info("Mine 220 more large blocks so we have requisite history") - for i in range(22): - # This can be slow, so do this in multiple RPC calls to avoid - # RPC timeouts. - self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects - sync_blocks(self.nodes[0:3], timeout=300) + mine_large_blocks(self.nodes[0], 220) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) @@ -331,16 +351,9 @@ class PruneTest(BitcoinTestFramework): self.log.info("Success") def run_test(self): - self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") - self.log.info("Mining a big blockchain of 995 blocks") - - # Determine default relay fee - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Cache for utxos, as the listunspent may take a long time later in the test - self.utxo_cache_0 = [] - self.utxo_cache_1 = [] + self.log.info("Warning! This test requires 4GB of disk space") + self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain |