aboutsummaryrefslogtreecommitdiff
path: root/test/functional
diff options
context:
space:
mode:
Diffstat (limited to 'test/functional')
-rwxr-xr-xtest/functional/assumevalid.py46
-rwxr-xr-xtest/functional/bip65-cltv-p2p.py39
-rwxr-xr-xtest/functional/bipdersig-p2p.py40
-rwxr-xr-xtest/functional/example_test.py23
-rwxr-xr-xtest/functional/listsinceblock.py35
-rwxr-xr-xtest/functional/maxuploadtarget.py71
-rwxr-xr-xtest/functional/minchainwork.py8
-rwxr-xr-xtest/functional/nulldummy.py4
-rwxr-xr-xtest/functional/p2p-acceptblock.py313
-rwxr-xr-xtest/functional/p2p-compactblocks.py21
-rwxr-xr-xtest/functional/p2p-feefilter.py26
-rwxr-xr-xtest/functional/p2p-fingerprint.py7
-rwxr-xr-xtest/functional/p2p-leaktests.py36
-rwxr-xr-xtest/functional/p2p-mempool.py12
-rwxr-xr-xtest/functional/p2p-segwit.py24
-rwxr-xr-xtest/functional/p2p-timeouts.py43
-rwxr-xr-xtest/functional/p2p-versionbits-warning.py12
-rwxr-xr-xtest/functional/segwit.py7
-rwxr-xr-xtest/functional/sendheaders.py50
-rw-r--r--test/functional/test_framework/blockstore.py4
-rwxr-xr-xtest/functional/test_framework/comptool.py14
-rwxr-xr-xtest/functional/test_framework/mininode.py228
-rwxr-xr-xtest/functional/test_framework/test_node.py49
-rwxr-xr-xtest/functional/test_runner.py2
-rwxr-xr-xtest/functional/wallet-accounts.py137
-rwxr-xr-xtest/functional/walletbackup.py10
-rwxr-xr-xtest/functional/zmq_test.py7
27 files changed, 650 insertions, 618 deletions
diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py
index 65685c48b7..36761d359e 100755
--- a/test/functional/assumevalid.py
+++ b/test/functional/assumevalid.py
@@ -39,13 +39,12 @@ from test_framework.mininode import (CBlockHeader,
CTxIn,
CTxOut,
NetworkThread,
- NodeConn,
NodeConnCB,
msg_block,
msg_headers)
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (p2p_port, assert_equal)
+from test_framework.util import assert_equal
class BaseNode(NodeConnCB):
def send_header_for_blocks(self, new_blocks):
@@ -65,13 +64,13 @@ class AssumeValidTest(BitcoinTestFramework):
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
- def send_blocks_until_disconnected(self, node):
+ def send_blocks_until_disconnected(self, p2p_conn):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
- if not node.connection:
+ if not p2p_conn.connection:
break
try:
- node.send_message(msg_block(self.blocks[i]))
+ p2p_conn.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert str(e) == 'Not connected, no pushbuf'
break
@@ -97,13 +96,10 @@ class AssumeValidTest(BitcoinTestFramework):
def run_test(self):
# Connect to node0
- node0 = BaseNode()
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
- node0.add_connection(connections[0])
+ p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
NetworkThread().start() # Start up network handling in another thread
- node0.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
@@ -165,37 +161,33 @@ class AssumeValidTest(BitcoinTestFramework):
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
- node1 = BaseNode() # connects to node1
- connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
- node1.add_connection(connections[1])
- node1.wait_for_verack()
+ p2p1 = self.nodes[1].add_p2p_connection(BaseNode())
+ p2p1.wait_for_verack()
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
- node2 = BaseNode() # connects to node2
- connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
- node2.add_connection(connections[2])
- node2.wait_for_verack()
+ p2p2 = self.nodes[2].add_p2p_connection(BaseNode())
+ p2p2.wait_for_verack()
# send header lists to all three nodes
- node0.send_header_for_blocks(self.blocks[0:2000])
- node0.send_header_for_blocks(self.blocks[2000:])
- node1.send_header_for_blocks(self.blocks[0:2000])
- node1.send_header_for_blocks(self.blocks[2000:])
- node2.send_header_for_blocks(self.blocks[0:200])
+ p2p0.send_header_for_blocks(self.blocks[0:2000])
+ p2p0.send_header_for_blocks(self.blocks[2000:])
+ p2p1.send_header_for_blocks(self.blocks[0:2000])
+ p2p1.send_header_for_blocks(self.blocks[2000:])
+ p2p2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
- self.send_blocks_until_disconnected(node0)
+ self.send_blocks_until_disconnected(p2p0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
- node1.send_message(msg_block(self.blocks[i]))
+ p2p1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
- node1.sync_with_ping(120)
+ p2p1.sync_with_ping(120)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
- self.send_blocks_until_disconnected(node2)
+ self.send_blocks_until_disconnected(p2p2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py
index 2cd6df6e37..3073324798 100755
--- a/test/functional/bip65-cltv-p2p.py
+++ b/test/functional/bip65-cltv-p2p.py
@@ -66,15 +66,12 @@ class BIP65Test(BitcoinTestFramework):
self.setup_clean_chain = True
def run_test(self):
- node0 = NodeConnCB()
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
- node0.add_connection(connections[0])
+ self.nodes[0].add_p2p_connection(NodeConnCB())
NetworkThread().start() # Start up network handling in another thread
# wait_for_verack ensures that the P2P connection is fully up.
- node0.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
@@ -95,7 +92,7 @@ class BIP65Test(BitcoinTestFramework):
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
@@ -104,15 +101,15 @@ class BIP65Test(BitcoinTestFramework):
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
- wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
+ wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
- assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
- assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000003)')
- assert_equal(node0.last_message["reject"].data, block.sha256)
- del node0.last_message["reject"]
+ assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
+ assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)')
+ assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
+ del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 4
@@ -125,7 +122,7 @@ class BIP65Test(BitcoinTestFramework):
# First we show that this tx is valid except for CLTV by getting it
# accepted to the mempool (which we can achieve with
# -promiscuousmempoolflags).
- node0.send_and_ping(msg_tx(spendtx))
+ self.nodes[0].p2p.send_and_ping(msg_tx(spendtx))
assert spendtx.hash in self.nodes[0].getrawmempool()
# Now we verify that a block with this transaction is invalid.
@@ -133,18 +130,18 @@ class BIP65Test(BitcoinTestFramework):
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
- wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
+ wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
- assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
- assert_equal(node0.last_message["reject"].data, block.sha256)
- if node0.last_message["reject"].code == REJECT_INVALID:
+ assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
+ assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
+ if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
- assert_equal(node0.last_message["reject"].reason, b'block-validation-failed')
+ assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
- assert b'Negative locktime' in node0.last_message["reject"].reason
+ assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
@@ -155,7 +152,7 @@ class BIP65Test(BitcoinTestFramework):
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py
index c620d3e155..e5febde42d 100755
--- a/test/functional/bipdersig-p2p.py
+++ b/test/functional/bipdersig-p2p.py
@@ -54,14 +54,12 @@ class BIP66Test(BitcoinTestFramework):
self.setup_clean_chain = True
def run_test(self):
- node0 = NodeConnCB()
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
- node0.add_connection(connections[0])
+ self.nodes[0].add_p2p_connection(NodeConnCB())
+
NetworkThread().start() # Start up network handling in another thread
# wait_for_verack ensures that the P2P connection is fully up.
- node0.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
@@ -83,7 +81,7 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
@@ -93,15 +91,15 @@ class BIP66Test(BitcoinTestFramework):
block.nVersion = 2
block.rehash()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
- wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
+ wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
- assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
- assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000002)')
- assert_equal(node0.last_message["reject"].data, block.sha256)
- del node0.last_message["reject"]
+ assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
+ assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
+ assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
+ del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
@@ -114,7 +112,7 @@ class BIP66Test(BitcoinTestFramework):
# First we show that this tx is valid except for DERSIG by getting it
# accepted to the mempool (which we can achieve with
# -promiscuousmempoolflags).
- node0.send_and_ping(msg_tx(spendtx))
+ self.nodes[0].p2p.send_and_ping(msg_tx(spendtx))
assert spendtx.hash in self.nodes[0].getrawmempool()
# Now we verify that a block with this transaction is invalid.
@@ -123,23 +121,23 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
- wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
+ wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# bitcoind is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and bitcoind produces more specific reject
# reasons.
- assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
- assert_equal(node0.last_message["reject"].data, block.sha256)
- if node0.last_message["reject"].code == REJECT_INVALID:
+ assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
+ assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
+ if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
- assert_equal(node0.last_message["reject"].reason, b'block-validation-failed')
+ assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
- assert b'Non-canonical DER signature' in node0.last_message["reject"].reason
+ assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0],
@@ -148,7 +146,7 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
- node0.send_and_ping(msg_block(block))
+ self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index 87d73ad14a..ba40f33016 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -18,7 +18,6 @@ from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
- NodeConn,
NodeConnCB,
mininode_lock,
msg_block,
@@ -28,7 +27,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
- p2p_port,
wait_until,
)
@@ -134,16 +132,13 @@ class ExampleTest(BitcoinTestFramework):
"""Main test logic"""
# Create a P2P connection to one of the nodes
- node0 = BaseNode()
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
- node0.add_connection(connections[0])
+ self.nodes[0].add_p2p_connection(BaseNode())
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
- node0.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
@@ -180,7 +175,7 @@ class ExampleTest(BitcoinTestFramework):
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our NodeConn connection
- node0.send_message(block_message)
+ self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
@@ -193,28 +188,26 @@ class ExampleTest(BitcoinTestFramework):
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
- node2 = BaseNode()
- connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
- node2.add_connection(connections[1])
- node2.wait_for_verack()
+ self.nodes[2].add_p2p_connection(BaseNode())
+ self.nodes[2].p2p.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
- node2.send_message(getdata_request)
+ self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# NodeConnCB objects.
- wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5, lock=mininode_lock)
+ wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
- for block in node2.block_receive_map.values():
+ for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
diff --git a/test/functional/listsinceblock.py b/test/functional/listsinceblock.py
index 6f428388ec..67e7744bf8 100755
--- a/test/functional/listsinceblock.py
+++ b/test/functional/listsinceblock.py
@@ -5,7 +5,7 @@
"""Test the listsincelast RPC."""
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal
+from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error
class ListSinceBlockTest (BitcoinTestFramework):
def set_test_params(self):
@@ -16,10 +16,43 @@ class ListSinceBlockTest (BitcoinTestFramework):
self.nodes[2].generate(101)
self.sync_all()
+ self.test_no_blockhash()
+ self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
+ def test_no_blockhash(self):
+ txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ blockhash, = self.nodes[2].generate(1)
+ self.sync_all()
+
+ txs = self.nodes[0].listtransactions()
+ assert_array_result(txs, {"txid": txid}, {
+ "category": "receive",
+ "amount": 1,
+ "blockhash": blockhash,
+ "confirmations": 1,
+ })
+ assert_equal(
+ self.nodes[0].listsinceblock(),
+ {"lastblock": blockhash,
+ "removed": [],
+ "transactions": txs})
+ assert_equal(
+ self.nodes[0].listsinceblock(""),
+ {"lastblock": blockhash,
+ "removed": [],
+ "transactions": txs})
+
+ def test_invalid_blockhash(self):
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
+ "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
+ "0000000000000000000000000000000000000000000000000000000000000000")
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
+ "invalid-hex")
+
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py
index 1f402798e7..9c92aa1dc0 100755
--- a/test/functional/maxuploadtarget.py
+++ b/test/functional/maxuploadtarget.py
@@ -49,19 +49,17 @@ class MaxUploadTest(BitcoinTestFramework):
# Generate some old blocks
self.nodes[0].generate(130)
- # test_nodes[0] will only request old blocks
- # test_nodes[1] will only request new blocks
- # test_nodes[2] will test resetting the counters
- test_nodes = []
- connections = []
+ # p2p_conns[0] will only request old blocks
+ # p2p_conns[1] will only request new blocks
+ # p2p_conns[2] will test resetting the counters
+ p2p_conns = []
for i in range(3):
- test_nodes.append(TestNode())
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
- test_nodes[i].add_connection(connections[i])
+ p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode()))
NetworkThread().start() # Start up network handling in another thread
- [x.wait_for_verack() for x in test_nodes]
+ for p2pc in p2p_conns:
+ p2pc.wait_for_verack()
# Test logic begins here
@@ -83,7 +81,7 @@ class MaxUploadTest(BitcoinTestFramework):
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
- # test_nodes[0] will test what happens if we just keep requesting the
+ # p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
@@ -97,34 +95,34 @@ class MaxUploadTest(BitcoinTestFramework):
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
- test_nodes[0].send_message(getdata_request)
- test_nodes[0].sync_with_ping()
- assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
+ p2p_conns[0].send_message(getdata_request)
+ p2p_conns[0].sync_with_ping()
+ assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
- test_nodes[0].send_message(getdata_request)
- test_nodes[0].wait_for_disconnect()
+ p2p_conns[0].send_message(getdata_request)
+ p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
- # Requesting the current block on test_nodes[1] should succeed indefinitely,
+ # Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
- test_nodes[1].send_message(getdata_request)
- test_nodes[1].sync_with_ping()
- assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
+ p2p_conns[1].send_message(getdata_request)
+ p2p_conns[1].sync_with_ping()
+ assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
- # But if test_nodes[1] tries for an old block, it gets disconnected too.
+ # But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
- test_nodes[1].send_message(getdata_request)
- test_nodes[1].wait_for_disconnect()
+ p2p_conns[1].send_message(getdata_request)
+ p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
@@ -132,39 +130,38 @@ class MaxUploadTest(BitcoinTestFramework):
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
- # and test_nodes[2] should be able to retrieve the old block.
+ # and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
- test_nodes[2].sync_with_ping()
- test_nodes[2].send_message(getdata_request)
- test_nodes[2].sync_with_ping()
- assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
+ p2p_conns[2].sync_with_ping()
+ p2p_conns[2].send_message(getdata_request)
+ p2p_conns[2].sync_with_ping()
+ assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
- [c.disconnect_node() for c in connections]
+ for i in range(3):
+ self.nodes[0].disconnect_p2p()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
- #recreate/reconnect a test node
- test_nodes = [TestNode()]
- connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])]
- test_nodes[0].add_connection(connections[0])
+ # Reconnect to self.nodes[0]
+ self.nodes[0].add_p2p_connection(TestNode())
NetworkThread().start() # Start up network handling in another thread
- test_nodes[0].wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
- test_nodes[0].send_message(getdata_request)
- test_nodes[0].sync_with_ping()
- assert_equal(test_nodes[0].block_receive_map[big_new_block], i+1)
+ self.nodes[0].p2p.send_message(getdata_request)
+ self.nodes[0].p2p.sync_with_ping()
+ assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
- test_nodes[0].send_and_ping(getdata_request)
+ self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
diff --git a/test/functional/minchainwork.py b/test/functional/minchainwork.py
index c7579d2548..35cd7ad141 100755
--- a/test/functional/minchainwork.py
+++ b/test/functional/minchainwork.py
@@ -27,6 +27,7 @@ class MinimumChainWorkTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
+
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
@@ -74,6 +75,13 @@ class MinimumChainWorkTest(BitcoinTestFramework):
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
+
+ # Because nodes in regtest are all manual connections (eg using
+ # addnode), node1 should not have disconnected node0. If not for that,
+ # we'd expect node1 to have disconnected node0 for serving an
+ # insufficient work chain, in which case we'd need to reconnect them to
+ # continue the test.
+
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py
index 91c4550653..7bc7c168f4 100755
--- a/test/functional/nulldummy.py
+++ b/test/functional/nulldummy.py
@@ -40,7 +40,9 @@ class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
+ # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
+ # normal segwit activation here (and don't use the default always-on behaviour).
+ self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py
index 27ae0c27e1..fbe5a78029 100755
--- a/test/functional/p2p-acceptblock.py
+++ b/test/functional/p2p-acceptblock.py
@@ -4,42 +4,32 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
-Since behavior differs when receiving unrequested blocks from whitelisted peers
-versus non-whitelisted peers, this tests the behavior of both (effectively two
-separate tests running in parallel).
+Setup: two nodes, node0+node1, not connected to each other. Node1 will have
+nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
-Setup: three nodes, node0+node1+node2, not connected to each other. Node0 does not
-whitelist localhost, but node1 does. They will each be on their own chain for
-this test. Node2 will have nMinimumChainWork set to 0x10, so it won't process
-low-work unrequested blocks.
-
-We have one NodeConn connection to each, test_node, white_node, and min_work_node,
-respectively.
+We have one NodeConn connection to node0 called test_node, and one to node1
+called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
- The tip should advance for node0 and node1, but node2 should skip processing
- due to nMinimumChainWork.
+ The tip should advance for node0, but node1 should skip processing due to
+ nMinimumChainWork.
-Node2 is unused in tests 3-7:
+Node1 is unused in tests 3-7:
-3. Mine a block that forks the previous block, and deliver to each node from
- corresponding peer.
- Node0 should not process this block (just accept the header), because it is
- unrequested and doesn't have more work than the tip.
- Node1 should process because this is coming from a whitelisted peer.
+3. Mine a block that forks from the genesis block, and deliver to test_node.
+ Node0 should not process this block (just accept the header), because it
+ is unrequested and doesn't have more or equal work to the tip.
-4. Send another block that builds on the forking block.
- Node0 should process this block but be stuck on the shorter chain, because
- it's missing an intermediate block.
- Node1 should reorg to this longer chain.
+4a,b. Send another two blocks that build on the forking block.
+ Node0 should process the second block but be stuck on the shorter chain,
+ because it's missing an intermediate block.
-4b.Send 288 more blocks on the longer chain.
+4c.Send 288 more blocks on the longer chain (the number of blocks ahead
+ we currently store).
Node0 should process all but the last block (too far ahead in height).
- Send all headers to Node1, and then send the last block in that chain.
- Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
@@ -52,16 +42,20 @@ Node2 is unused in tests 3-7:
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
-8. Test Node2 is able to sync when connected to node0 (which should have sufficient
-work on its chain).
+8. Create a fork which is invalid at a height longer than the current chain
+ (ie to which the node will try to reorg) but which has headers built on top
+ of the invalid block. Check that we get disconnected if we send more headers
+ on the chain the node now knows to be invalid.
+9. Test Node1 is able to sync when connected to node0 (which should have sufficient
+ work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
-from test_framework.blocktools import create_block, create_coinbase
+from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
@@ -71,8 +65,8 @@ class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 3
- self.extra_args = [[], ["-whitelist=127.0.0.1"], ["-minimumchainwork=0x10"]]
+ self.num_nodes = 2
+ self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
@@ -84,132 +78,140 @@ class AcceptBlockTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
- test_node = NodeConnCB() # connects to node0 (not whitelisted)
- white_node = NodeConnCB() # connects to node1 (whitelisted)
- min_work_node = NodeConnCB() # connects to node2 (not whitelisted)
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
- connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
- connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node))
- test_node.add_connection(connections[0])
- white_node.add_connection(connections[1])
- min_work_node.add_connection(connections[2])
+ # test_node connects to node0 (not whitelisted)
+ test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
+ # min_work_node connects to node1
+ min_work_node = self.nodes[1].add_p2p_connection(NodeConnCB())
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
- white_node.wait_for_verack()
min_work_node.wait_for_verack()
- # 1. Have nodes mine a block (nodes1/2 leave IBD)
+ # 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
- # This should be accepted by nodes 1/2
+ # This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
- for i in range(3):
+ for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
- white_node.send_message(msg_block(blocks_h2[1]))
- min_work_node.send_message(msg_block(blocks_h2[2]))
+ min_work_node.send_message(msg_block(blocks_h2[1]))
- for x in [test_node, white_node, min_work_node]:
+ for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
- assert_equal(self.nodes[1].getblockcount(), 2)
- assert_equal(self.nodes[2].getblockcount(), 1)
- self.log.info("First height 2 block accepted by node0/node1; correctly rejected by node2")
+ assert_equal(self.nodes[1].getblockcount(), 1)
+ self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
- # 3. Send another block that builds on the original tip.
- blocks_h2f = [] # Blocks at height 2 that fork off the main chain
- for i in range(2):
- blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
- blocks_h2f[i].solve()
- test_node.send_message(msg_block(blocks_h2f[0]))
- white_node.send_message(msg_block(blocks_h2f[1]))
+ # 3. Send another block that builds on genesis.
+ block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
+ block_time += 1
+ block_h1f.solve()
+ test_node.send_message(msg_block(block_h1f))
- for x in [test_node, white_node]:
- x.sync_with_ping()
+ test_node.sync_with_ping()
+ tip_entry_found = False
for x in self.nodes[0].getchaintips():
- if x['hash'] == blocks_h2f[0].hash:
+ if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
- for x in self.nodes[1].getchaintips():
- if x['hash'] == blocks_h2f[1].hash:
- assert_equal(x['status'], "valid-headers")
+ # 4. Send another two block that build on the fork.
+ block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
+ block_time += 1
+ block_h2f.solve()
+ test_node.send_message(msg_block(block_h2f))
- self.log.info("Second height 2 block accepted only from whitelisted peer")
+ test_node.sync_with_ping()
+ # Since the earlier block was not processed by node, the new block
+ # can't be fully validated.
+ tip_entry_found = False
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == block_h2f.hash:
+ assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
- # 4. Now send another block that builds on the forking chain.
- blocks_h3 = []
- for i in range(2):
- blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
- blocks_h3[i].solve()
- test_node.send_message(msg_block(blocks_h3[0]))
- white_node.send_message(msg_block(blocks_h3[1]))
+ # But this block should be accepted by node since it has equal work.
+ self.nodes[0].getblock(block_h2f.hash)
+ self.log.info("Second height 2 block accepted, but not reorg'ed to")
- for x in [test_node, white_node]:
- x.sync_with_ping()
- # Since the earlier block was not processed by node0, the new block
+ # 4b. Now send another block that builds on the forking chain.
+ block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
+ block_h3.solve()
+ test_node.send_message(msg_block(block_h3))
+
+ test_node.sync_with_ping()
+ # Since the earlier block was not processed by node, the new block
# can't be fully validated.
+ tip_entry_found = False
for x in self.nodes[0].getchaintips():
- if x['hash'] == blocks_h3[0].hash:
+ if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ self.nodes[0].getblock(block_h3.hash)
+
+ # But this block should be accepted by node since it has more work.
+ self.nodes[0].getblock(block_h3.hash)
+ self.log.info("Unrequested more-work block accepted")
+
+ # 4c. Now mine 288 more blocks and deliver; all should be processed but
+ # the last (height-too-high) on node (as long as its not missing any headers)
+ tip = block_h3
+ all_blocks = []
+ for i in range(288):
+ next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
+ next_block.solve()
+ all_blocks.append(next_block)
+ tip = next_block
+
+ # Now send the block at height 5 and check that it wasn't accepted (missing header)
+ test_node.send_message(msg_block(all_blocks[1]))
+ test_node.sync_with_ping()
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
+ assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
- # But this block should be accepted by node0 since it has more work.
- self.nodes[0].getblock(blocks_h3[0].hash)
- self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
+ # The block at height 5 should be accepted if we provide the missing header, though
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(all_blocks[0]))
+ test_node.send_message(headers_message)
+ test_node.send_message(msg_block(all_blocks[1]))
+ test_node.sync_with_ping()
+ self.nodes[0].getblock(all_blocks[1].hash)
- # Node1 should have accepted and reorged.
- assert_equal(self.nodes[1].getblockcount(), 3)
- self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
+ # Now send the blocks in all_blocks
+ for i in range(288):
+ test_node.send_message(msg_block(all_blocks[i]))
+ test_node.sync_with_ping()
- # 4b. Now mine 288 more blocks and deliver; all should be processed but
- # the last (height-too-high) on node0. Node1 should process the tip if
- # we give it the headers chain leading to the tip.
- tips = blocks_h3
- headers_message = msg_headers()
- all_blocks = [] # node0's blocks
- for j in range(2):
- for i in range(288):
- next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
- next_block.solve()
- if j==0:
- test_node.send_message(msg_block(next_block))
- all_blocks.append(next_block)
- else:
- headers_message.headers.append(CBlockHeader(next_block))
- tips[j] = next_block
-
- time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
- headers_message.headers.pop() # Ensure the last block is unrequested
- white_node.send_message(headers_message) # Send headers leading to tip
- white_node.send_message(msg_block(tips[1])) # Now deliver the tip
- white_node.sync_with_ping()
- self.nodes[1].getblock(tips[1].hash)
- self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
-
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
- test_node.send_message(msg_block(blocks_h2f[0]))
- # Here, if the sleep is too short, the test could falsely succeed (if the
- # node hasn't processed the block by the time the sleep returns, and then
- # the node processes it and incorrectly advances the tip).
- # But this would be caught later on, when we verify that an inv triggers
- # a getdata request for this block.
+ # The node should have requested the blocks at some point, so
+ # disconnect/reconnect first
+
+ self.nodes[0].disconnect_p2p()
+ test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
+
+ test_node.wait_for_verack()
+ test_node.send_message(msg_block(block_h1f))
+
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
@@ -220,29 +222,98 @@ class AcceptBlockTest(BitcoinTestFramework):
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
- test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
+ test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
- assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
+ assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
- test_node.send_message(msg_block(blocks_h2f[0]))
+ test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
+ self.nodes[0].getblock(all_blocks[286].hash)
+ assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
- # 8. Connect node2 to node0 and ensure it is able to sync
- connect_nodes(self.nodes[0], 2)
- sync_blocks([self.nodes[0], self.nodes[2]])
- self.log.info("Successfully synced nodes 2 and 0")
+ # 8. Create a chain which is invalid at a height longer than the
+ # current chain, but which has more blocks on top of that
+ block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
+ block_289f.solve()
+ block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
+ block_290f.solve()
+ block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
+ # block_291 spends a coinbase below maturity!
+ block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
+ block_291.hashMerkleRoot = block_291.calc_merkle_root()
+ block_291.solve()
+ block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
+ block_292.solve()
+
+ # Now send all the headers on the chain and enough blocks to trigger reorg
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(block_289f))
+ headers_message.headers.append(CBlockHeader(block_290f))
+ headers_message.headers.append(CBlockHeader(block_291))
+ headers_message.headers.append(CBlockHeader(block_292))
+ test_node.send_message(headers_message)
+
+ test_node.sync_with_ping()
+ tip_entry_found = False
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == block_292.hash:
+ assert_equal(x['status'], "headers-only")
+ tip_entry_found = True
+ assert(tip_entry_found)
+ assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
+
+ test_node.send_message(msg_block(block_289f))
+ test_node.send_message(msg_block(block_290f))
- [ c.disconnect_node() for c in connections ]
+ test_node.sync_with_ping()
+ self.nodes[0].getblock(block_289f.hash)
+ self.nodes[0].getblock(block_290f.hash)
+
+ test_node.send_message(msg_block(block_291))
+
+ # At this point we've sent an obviously-bogus block, wait for full processing
+ # without assuming whether we will be disconnected or not
+ try:
+ # Only wait a short while so the test doesn't take forever if we do get
+ # disconnected
+ test_node.sync_with_ping(timeout=1)
+ except AssertionError:
+ test_node.wait_for_disconnect()
+
+ self.nodes[0].disconnect_p2p()
+ test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
+
+ NetworkThread().start() # Start up network handling in another thread
+ test_node.wait_for_verack()
+
+ # We should have failed reorg and switched back to 290 (but have block 291)
+ assert_equal(self.nodes[0].getblockcount(), 290)
+ assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
+ assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
+
+ # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
+ block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
+ block_293.solve()
+ headers_message = msg_headers()
+ headers_message.headers.append(CBlockHeader(block_293))
+ test_node.send_message(headers_message)
+ test_node.wait_for_disconnect()
+
+ # 9. Connect node1 to node0 and ensure it is able to sync
+ connect_nodes(self.nodes[0], 1)
+ sync_blocks([self.nodes[0], self.nodes[1]])
+ self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py
index 94513d3f43..d2c4d39305 100755
--- a/test/functional/p2p-compactblocks.py
+++ b/test/functional/p2p-compactblocks.py
@@ -93,7 +93,9 @@ class CompactBlocksTest(BitcoinTestFramework):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
- self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
+ # This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window).
+ # TODO: Rewrite this test to support SegWit being always active.
+ self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
@@ -786,23 +788,12 @@ class CompactBlocksTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
- self.test_node = TestNode()
- self.segwit_node = TestNode()
- self.old_node = TestNode() # version 1 peer <--> segwit node
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
- connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
- self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
- connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
- self.old_node, services=NODE_NETWORK))
- self.test_node.add_connection(connections[0])
- self.segwit_node.add_connection(connections[1])
- self.old_node.add_connection(connections[2])
+ self.test_node = self.nodes[0].add_p2p_connection(TestNode())
+ self.segwit_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
+ self.old_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK)
NetworkThread().start() # Start up network handling in another thread
- # Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py
index 8c92365ced..624278df40 100755
--- a/test/functional/p2p-feefilter.py
+++ b/test/functional/p2p-feefilter.py
@@ -48,25 +48,23 @@ class FeeFilterTest(BitcoinTestFramework):
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
- test_node = TestNode()
- connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
- test_node.add_connection(connection)
+ self.nodes[0].add_p2p_connection(TestNode())
NetworkThread().start()
- test_node.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert(allInvsMatch(txids, test_node))
- test_node.clear_invs()
+ assert(allInvsMatch(txids, self.nodes[0].p2p))
+ self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
- test_node.send_and_ping(msg_feefilter(15000))
+ self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert(allInvsMatch(txids, test_node))
- test_node.clear_invs()
+ assert(allInvsMatch(txids, self.nodes[0].p2p))
+ self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
@@ -82,14 +80,14 @@ class FeeFilterTest(BitcoinTestFramework):
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
- assert(allInvsMatch(txids, test_node))
- test_node.clear_invs()
+ assert(allInvsMatch(txids, self.nodes[0].p2p))
+ self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
- test_node.send_and_ping(msg_feefilter(0))
+ self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert(allInvsMatch(txids, test_node))
- test_node.clear_invs()
+ assert(allInvsMatch(txids, self.nodes[0].p2p))
+ self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
diff --git a/test/functional/p2p-fingerprint.py b/test/functional/p2p-fingerprint.py
index fe60c6cd46..4b6446fc5b 100755
--- a/test/functional/p2p-fingerprint.py
+++ b/test/functional/p2p-fingerprint.py
@@ -14,7 +14,6 @@ from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
- NodeConn,
NodeConnCB,
msg_headers,
msg_block,
@@ -77,11 +76,7 @@ class P2PFingerprintTest(BitcoinTestFramework):
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
- node0 = NodeConnCB()
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
- node0.add_connection(connections[0])
+ node0 = self.nodes[0].add_p2p_connection(NodeConnCB())
NetworkThread().start()
node0.wait_for_verack()
diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py
index 1dc8f72cd6..a6e47b5df6 100755
--- a/test/functional/p2p-leaktests.py
+++ b/test/functional/p2p-leaktests.py
@@ -39,7 +39,6 @@ class CLazyNode(NodeConnCB):
def on_reject(self, conn, message): self.bad_message(message)
def on_inv(self, conn, message): self.bad_message(message)
def on_addr(self, conn, message): self.bad_message(message)
- def on_alert(self, conn, message): self.bad_message(message)
def on_getdata(self, conn, message): self.bad_message(message)
def on_getblocks(self, conn, message): self.bad_message(message)
def on_tx(self, conn, message): self.bad_message(message)
@@ -97,24 +96,13 @@ class P2PLeakTest(BitcoinTestFramework):
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
- no_version_bannode = CNodeNoVersionBan()
- no_version_idlenode = CNodeNoVersionIdle()
- no_verack_idlenode = CNodeNoVerackIdle()
- unsupported_service_bit5_node = CLazyNode()
- unsupported_service_bit7_node = CLazyNode()
-
self.nodes[0].setmocktime(1501545600) # August 1st 2017
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7))
- no_version_bannode.add_connection(connections[0])
- no_version_idlenode.add_connection(connections[1])
- no_verack_idlenode.add_connection(connections[2])
- unsupported_service_bit5_node.add_connection(connections[3])
- unsupported_service_bit7_node.add_connection(connections[4])
+
+ no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False)
+ no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False)
+ no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
+ unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
+ unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
NetworkThread().start() # Start up network handling in another thread
@@ -137,7 +125,8 @@ class P2PLeakTest(BitcoinTestFramework):
assert not unsupported_service_bit5_node.connected
assert not unsupported_service_bit7_node.connected
- [conn.disconnect_node() for conn in connections]
+ for _ in range(5):
+ self.nodes[0].disconnect_p2p()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
@@ -152,13 +141,8 @@ class P2PLeakTest(BitcoinTestFramework):
self.log.info("Service bits 5 and 7 are allowed after August 1st 2018")
self.nodes[0].setmocktime(1533168000) # August 2nd 2018
- allowed_service_bit5_node = NodeConnCB()
- allowed_service_bit7_node = NodeConnCB()
-
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7))
- allowed_service_bit5_node.add_connection(connections[5])
- allowed_service_bit7_node.add_connection(connections[6])
+ allowed_service_bit5_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
+ allowed_service_bit7_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
NetworkThread().start() # Network thread stopped when all previous NodeConnCBs disconnected. Restart it
diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py
index 40fcde2605..be467c4223 100755
--- a/test/functional/p2p-mempool.py
+++ b/test/functional/p2p-mempool.py
@@ -19,16 +19,14 @@ class P2PMempoolTests(BitcoinTestFramework):
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
- #connect a mininode
- aTestNode = NodeConnCB()
- node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
- aTestNode.add_connection(node)
+ # Add a p2p connection
+ self.nodes[0].add_p2p_connection(NodeConnCB())
NetworkThread().start()
- aTestNode.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
#request mempool
- aTestNode.send_message(msg_mempool())
- aTestNode.wait_for_disconnect()
+ self.nodes[0].p2p.send_message(msg_mempool())
+ self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py
index f803367668..22da7f2db1 100755
--- a/test/functional/p2p-segwit.py
+++ b/test/functional/p2p-segwit.py
@@ -111,7 +111,8 @@ class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
- self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
+ # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
+ self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
@@ -1493,7 +1494,7 @@ class SegWitTest(BitcoinTestFramework):
# Restart with the new binary
self.stop_node(node_id)
- self.start_node(node_id, extra_args=[])
+ self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
@@ -1867,19 +1868,12 @@ class SegWitTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
- self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
- self.old_node = TestNode() # only NODE_NETWORK
- self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
-
- self.p2p_connections = [self.test_node, self.old_node]
-
- self.connections = []
- self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
- self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
- self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
- self.test_node.add_connection(self.connections[0])
- self.old_node.add_connection(self.connections[1])
- self.std_node.add_connection(self.connections[2])
+ # self.test_node sets NODE_WITNESS|NODE_NETWORK
+ self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
+ # self.old_node sets only NODE_NETWORK
+ self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK)
+ # self.std_node is for testing node1 (fRequireStandard=true)
+ self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
NetworkThread().start() # Start up network handling in another thread
diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py
index 51d4769efc..14a3bf48fb 100755
--- a/test/functional/p2p-timeouts.py
+++ b/test/functional/p2p-timeouts.py
@@ -39,46 +39,37 @@ class TimeoutsTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
- self.no_verack_node = TestNode() # never send verack
- self.no_version_node = TestNode() # never send version (just ping)
- self.no_send_node = TestNode() # never send anything
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
- self.no_verack_node.add_connection(connections[0])
- self.no_version_node.add_connection(connections[1])
- self.no_send_node.add_connection(connections[2])
+ no_verack_node = self.nodes[0].add_p2p_connection(TestNode())
+ no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False)
+ no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False)
NetworkThread().start() # Start up network handling in another thread
sleep(1)
- assert(self.no_verack_node.connected)
- assert(self.no_version_node.connected)
- assert(self.no_send_node.connected)
+ assert no_verack_node.connected
+ assert no_version_node.connected
+ assert no_send_node.connected
- ping_msg = msg_ping()
- connections[0].send_message(ping_msg)
- connections[1].send_message(ping_msg)
+ no_verack_node.send_message(msg_ping())
+ no_version_node.send_message(msg_ping())
sleep(30)
- assert "version" in self.no_verack_node.last_message
+ assert "version" in no_verack_node.last_message
- assert(self.no_verack_node.connected)
- assert(self.no_version_node.connected)
- assert(self.no_send_node.connected)
+ assert no_verack_node.connected
+ assert no_version_node.connected
+ assert no_send_node.connected
- connections[0].send_message(ping_msg)
- connections[1].send_message(ping_msg)
+ no_verack_node.send_message(msg_ping())
+ no_version_node.send_message(msg_ping())
sleep(31)
- assert(not self.no_verack_node.connected)
- assert(not self.no_version_node.connected)
- assert(not self.no_send_node.connected)
+ assert not no_verack_node.connected
+ assert not no_version_node.connected
+ assert not no_send_node.connected
if __name__ == '__main__':
TimeoutsTest().main()
diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py
index f9bef2580a..464ca5a312 100755
--- a/test/functional/p2p-versionbits-warning.py
+++ b/test/functional/p2p-versionbits-warning.py
@@ -64,16 +64,12 @@ class VersionBitsWarningTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connection and start up the network thread.
- test_node = TestNode()
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
- test_node.add_connection(connections[0])
+ self.nodes[0].add_p2p_connection(TestNode())
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
- test_node.wait_for_verack()
+ self.nodes[0].p2p.wait_for_verack()
# 1. Have the node mine one period worth of blocks
self.nodes[0].generate(VB_PERIOD)
@@ -81,7 +77,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
# blocks signaling some unknown bit.
nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT)
- self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion)
+ self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD-1, nVersion)
# Fill rest of period with regular version blocks
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
@@ -92,7 +88,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
# some unknown bit
- self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion)
+ self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion)
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
# Might not get a versionbits-related alert yet, as we should
# have gotten a different alert due to more than 51/100 blocks
diff --git a/test/functional/segwit.py b/test/functional/segwit.py
index 6ecade7cb6..338fa1bc52 100755
--- a/test/functional/segwit.py
+++ b/test/functional/segwit.py
@@ -77,9 +77,10 @@ class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
- self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0"],
- ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"],
- ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]]
+ # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
+ self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999"],
+ ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999"],
+ ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999"]]
def setup_network(self):
super().setup_network()
diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py
index 60d107b248..55bb80ea00 100755
--- a/test/functional/sendheaders.py
+++ b/test/functional/sendheaders.py
@@ -10,6 +10,17 @@ Setup:
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
+test_null_locators
+==================
+
+Sends two getheaders requests with null locator values. First request's hashstop
+value refers to validated block, while second request's hashstop value refers to
+a block which hasn't been validated. Verifies only the first request returns
+headers.
+
+test_nonnull_locators
+=====================
+
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
@@ -181,7 +192,7 @@ class SendHeadersTest(BitcoinTestFramework):
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
- [ x.clear_last_announcement() for x in self.p2p_connections ]
+ [x.clear_last_announcement() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
@@ -193,7 +204,7 @@ class SendHeadersTest(BitcoinTestFramework):
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
- for x in self.p2p_connections:
+ for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
@@ -206,18 +217,10 @@ class SendHeadersTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
- inv_node = TestNode()
- test_node = TestNode()
-
- self.p2p_connections = [inv_node, test_node]
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
+ inv_node = self.nodes[0].add_p2p_connection(TestNode())
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
- inv_node.add_connection(connections[0])
- test_node.add_connection(connections[1])
+ test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_WITNESS)
NetworkThread().start() # Start up network handling in another thread
@@ -229,6 +232,29 @@ class SendHeadersTest(BitcoinTestFramework):
inv_node.sync_with_ping()
test_node.sync_with_ping()
+ self.test_null_locators(test_node)
+ self.test_nonnull_locators(test_node, inv_node)
+
+ def test_null_locators(self, test_node):
+ tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
+ tip_hash = int(tip["hash"], 16)
+
+ self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
+ test_node.clear_last_announcement()
+ test_node.get_headers(locator=[], hashstop=tip_hash)
+ assert_equal(test_node.check_last_announcement(headers=[tip_hash]), True)
+
+ self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
+ block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
+ block.solve()
+ test_node.send_header_for_blocks([block])
+ test_node.clear_last_announcement()
+ test_node.get_headers(locator=[], hashstop=int(block.hash, 16))
+ test_node.sync_with_ping()
+ assert_equal(test_node.block_announced, False)
+ test_node.send_message(msg_block(block))
+
+ def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py
index ad04722488..051c57a6c7 100644
--- a/test/functional/test_framework/blockstore.py
+++ b/test/functional/test_framework/blockstore.py
@@ -100,7 +100,7 @@ class BlockStore():
def get_blocks(self, inv):
responses = []
for i in inv:
- if (i.type == 2): # MSG_BLOCK
+ if (i.type == 2 or i.type == (2 | (1 << 30))): # MSG_BLOCK or MSG_WITNESS_BLOCK
data = self.get(i.hash)
if data is not None:
# Use msg_generic to avoid re-serialization
@@ -153,7 +153,7 @@ class TxStore():
def get_transactions(self, inv):
responses = []
for i in inv:
- if (i.type == 1): # MSG_TX
+ if (i.type == 1 or i.type == (1 | (1 << 30))): # MSG_TX or MSG_WITNESS_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_generic(b"tx", tx))
diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py
index b0417e02d8..03f967ba71 100755
--- a/test/functional/test_framework/comptool.py
+++ b/test/functional/test_framework/comptool.py
@@ -80,9 +80,9 @@ class TestNode(NodeConnCB):
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
- if i.type == 1:
+ if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX
self.tx_request_map[i.hash] = True
- elif i.type == 2:
+ elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
@@ -295,8 +295,11 @@ class TestManager():
# Wait until verack is received
self.wait_for_verack()
- test_number = 1
- for test_instance in self.test_generator.get_tests():
+ test_number = 0
+ tests = self.test_generator.get_tests()
+ for test_instance in tests:
+ test_number += 1
+ logger.info("Running test %d: %s line %s" % (test_number, tests.gi_code.co_filename, tests.gi_frame.f_lineno))
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
@@ -397,9 +400,6 @@ class TestManager():
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
- logger.info("Test %d: PASS" % test_number)
- test_number += 1
-
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 345ecfe76d..8fbc63fba4 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -37,7 +37,7 @@ from threading import RLock, Thread
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
-BIP0031_VERSION = 60000
+MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
@@ -666,81 +666,6 @@ class CBlock(CBlockHeader):
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
-class CUnsignedAlert():
- def __init__(self):
- self.nVersion = 1
- self.nRelayUntil = 0
- self.nExpiration = 0
- self.nID = 0
- self.nCancel = 0
- self.setCancel = []
- self.nMinVer = 0
- self.nMaxVer = 0
- self.setSubVer = []
- self.nPriority = 0
- self.strComment = b""
- self.strStatusBar = b""
- self.strReserved = b""
-
- def deserialize(self, f):
- self.nVersion = struct.unpack("<i", f.read(4))[0]
- self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
- self.nExpiration = struct.unpack("<q", f.read(8))[0]
- self.nID = struct.unpack("<i", f.read(4))[0]
- self.nCancel = struct.unpack("<i", f.read(4))[0]
- self.setCancel = deser_int_vector(f)
- self.nMinVer = struct.unpack("<i", f.read(4))[0]
- self.nMaxVer = struct.unpack("<i", f.read(4))[0]
- self.setSubVer = deser_string_vector(f)
- self.nPriority = struct.unpack("<i", f.read(4))[0]
- self.strComment = deser_string(f)
- self.strStatusBar = deser_string(f)
- self.strReserved = deser_string(f)
-
- def serialize(self):
- r = b""
- r += struct.pack("<i", self.nVersion)
- r += struct.pack("<q", self.nRelayUntil)
- r += struct.pack("<q", self.nExpiration)
- r += struct.pack("<i", self.nID)
- r += struct.pack("<i", self.nCancel)
- r += ser_int_vector(self.setCancel)
- r += struct.pack("<i", self.nMinVer)
- r += struct.pack("<i", self.nMaxVer)
- r += ser_string_vector(self.setSubVer)
- r += struct.pack("<i", self.nPriority)
- r += ser_string(self.strComment)
- r += ser_string(self.strStatusBar)
- r += ser_string(self.strReserved)
- return r
-
- def __repr__(self):
- return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
- % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
- self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
- self.strComment, self.strStatusBar, self.strReserved)
-
-
-class CAlert():
- def __init__(self):
- self.vchMsg = b""
- self.vchSig = b""
-
- def deserialize(self, f):
- self.vchMsg = deser_string(f)
- self.vchSig = deser_string(f)
-
- def serialize(self):
- r = b""
- r += ser_string(self.vchMsg)
- r += ser_string(self.vchSig)
- return r
-
- def __repr__(self):
- return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
- % (len(self.vchMsg), len(self.vchSig))
-
-
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
@@ -949,7 +874,7 @@ class msg_version():
def __init__(self):
self.nVersion = MY_VERSION
- self.nServices = 1
+ self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
@@ -1044,25 +969,6 @@ class msg_addr():
return "msg_addr(addrs=%s)" % (repr(self.addrs))
-class msg_alert():
- command = b"alert"
-
- def __init__(self):
- self.alert = CAlert()
-
- def deserialize(self, f):
- self.alert = CAlert()
- self.alert.deserialize(f)
-
- def serialize(self):
- r = b""
- r += self.alert.serialize()
- return r
-
- def __repr__(self):
- return "msg_alert(alert=%s)" % (repr(self.alert), )
-
-
class msg_inv():
command = b"inv"
@@ -1195,22 +1101,6 @@ class msg_getaddr():
return "msg_getaddr()"
-class msg_ping_prebip31():
- command = b"ping"
-
- def __init__(self):
- pass
-
- def deserialize(self, f):
- pass
-
- def serialize(self):
- return b""
-
- def __repr__(self):
- return "msg_ping() (pre-bip31)"
-
-
class msg_ping():
command = b"ping"
@@ -1458,9 +1348,7 @@ class NodeConnCB():
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
- if they want to alter message handling behaviour.
- """
-
+ if they want to alter message handling behaviour."""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
@@ -1474,25 +1362,13 @@ class NodeConnCB():
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
- # deliver_sleep_time is helpful for debugging race conditions in p2p
- # tests; it causes message delivery to sleep for the specified time
- # before acquiring the global lock and delivering the next message.
- self.deliver_sleep_time = None
-
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
- and the most recent message of each type.
-
- Optionally waits for deliver_sleep_time before dispatching message.
- """
-
- deliver_sleep = self.get_deliver_sleep_time()
- if deliver_sleep is not None:
- time.sleep(deliver_sleep)
+ and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
@@ -1504,10 +1380,6 @@ class NodeConnCB():
sys.exc_info()[0]))
raise
- def get_deliver_sleep_time(self):
- with mininode_lock:
- return self.deliver_sleep_time
-
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
@@ -1519,7 +1391,6 @@ class NodeConnCB():
self.connection = None
def on_addr(self, conn, message): pass
- def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
@@ -1546,19 +1417,15 @@ class NodeConnCB():
conn.send_message(want)
def on_ping(self, conn, message):
- if conn.ver_send > BIP0031_VERSION:
- conn.send_message(msg_pong(message.nonce))
+ conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
- if message.nVersion >= 209:
- conn.send_message(msg_verack())
- conn.ver_send = min(MY_VERSION, message.nVersion)
- if message.nVersion < 209:
- conn.ver_recv = conn.ver_send
+ assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
+ conn.send_message(msg_verack())
conn.nServices = message.nServices
# Connection helper methods
@@ -1616,14 +1483,14 @@ class NodeConnCB():
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
-# The actual NodeConn class
-# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
+ """The actual NodeConn class
+
+ This class provides an interface for a p2p connection to a specified node."""
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
- b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
@@ -1649,7 +1516,7 @@ class NodeConn(asyncore.dispatcher):
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
- def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
+ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK|NODE_WITNESS, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
@@ -1740,40 +1607,27 @@ class NodeConn(asyncore.dispatcher):
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
- if self.ver_recv < 209:
- if len(self.recvbuf) < 4 + 12 + 4:
- return
- command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
- msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
- checksum = None
- if len(self.recvbuf) < 4 + 12 + 4 + msglen:
- return
- msg = self.recvbuf[4+12+4:4+12+4+msglen]
- self.recvbuf = self.recvbuf[4+12+4+msglen:]
- else:
- if len(self.recvbuf) < 4 + 12 + 4 + 4:
- return
- command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
- msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
- checksum = self.recvbuf[4+12+4:4+12+4+4]
- if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
- return
- msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
- th = sha256(msg)
- h = sha256(th)
- if checksum != h[:4]:
- raise ValueError("got bad checksum " + repr(self.recvbuf))
- self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
- if command in self.messagemap:
- f = BytesIO(msg)
- t = self.messagemap[command]()
- t.deserialize(f)
- self.got_message(t)
- else:
- logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
- raise ValueError("Unknown command: '%s'" % (command))
+ if len(self.recvbuf) < 4 + 12 + 4 + 4:
+ return
+ command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = self.recvbuf[4+12+4:4+12+4+4]
+ if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
+ th = sha256(msg)
+ h = sha256(th)
+ if checksum != h[:4]:
+ raise ValueError("got bad checksum " + repr(self.recvbuf))
+ self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
+ if command not in self.messagemap:
+ raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
+ f = BytesIO(msg)
+ t = self.messagemap[command]()
+ t.deserialize(f)
+ self.got_message(t)
except Exception as e:
- logger.exception('got_data:', repr(e))
+ logger.exception('Error reading message:', repr(e))
raise
def send_message(self, message, pushbuf=False):
@@ -1786,10 +1640,9 @@ class NodeConn(asyncore.dispatcher):
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
- if self.ver_send >= 209:
- th = sha256(data)
- h = sha256(th)
- tmsg += h[:4]
+ th = sha256(data)
+ h = sha256(th)
+ tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
@@ -1803,9 +1656,6 @@ class NodeConn(asyncore.dispatcher):
self.last_sent = time.time()
def got_message(self, message):
- if message.command == b"version":
- if message.nVersion <= BIP0031_VERSION:
- self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
@@ -1838,13 +1688,3 @@ class NetworkThread(Thread):
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
-
-
-# An exception we can raise if we detect a potential disconnect
-# (p2p or rpc) before the test is complete
-class EarlyDisconnectError(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return repr(self.value)
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 12dab57a02..8b28064c46 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -13,13 +13,15 @@ import os
import subprocess
import time
+from .authproxy import JSONRPCException
+from .mininode import NodeConn
from .util import (
assert_equal,
get_rpc_proxy,
rpc_url,
wait_until,
+ p2p_port,
)
-from .authproxy import JSONRPCException
BITCOIND_PROC_WAIT_TIMEOUT = 60
@@ -31,9 +33,11 @@ class TestNode():
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
+ - one or more P2P connections to the node
+
- To make things easier for the test writer, a bit of magic is happening under the covers.
- Any unrecognised messages will be dispatched to the RPC connection."""
+ To make things easier for the test writer, any unrecognised messages will
+ be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
@@ -63,10 +67,12 @@ class TestNode():
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
- def __getattr__(self, *args, **kwargs):
+ self.p2ps = []
+
+ def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
- return self.rpc.__getattr__(*args, **kwargs)
+ return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
@@ -119,6 +125,7 @@ class TestNode():
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
+ del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
@@ -151,6 +158,38 @@ class TestNode():
self.encryptwallet(passphrase)
self.wait_until_stopped()
+ def add_p2p_connection(self, p2p_conn, **kwargs):
+ """Add a p2p connection to the node.
+
+ This method adds the p2p connection to the self.p2ps list and also
+ returns the connection to the caller."""
+ if 'dstport' not in kwargs:
+ kwargs['dstport'] = p2p_port(self.index)
+ if 'dstaddr' not in kwargs:
+ kwargs['dstaddr'] = '127.0.0.1'
+ self.p2ps.append(p2p_conn)
+ kwargs.update({'rpc': self.rpc, 'callback': p2p_conn})
+ p2p_conn.add_connection(NodeConn(**kwargs))
+
+ return p2p_conn
+
+ @property
+ def p2p(self):
+ """Return the first p2p connection
+
+ Convenience property - most tests only use a single p2p connection to each
+ node, so this saves having to write node.p2ps[0] many times."""
+ assert self.p2ps, "No p2p connection"
+ return self.p2ps[0]
+
+ def disconnect_p2p(self, index=0):
+ """Close the p2p connection to the node."""
+ # Connection could have already been closed by other end. Calling disconnect_p2p()
+ # on an already disconnected p2p connection is not an error.
+ if self.p2ps[index].connection is not None:
+ self.p2ps[index].connection.disconnect_node()
+ del self.p2ps[index]
+
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 80a5ffefb4..ca36426a0a 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -125,6 +125,7 @@ BASE_SCRIPTS= [
'minchainwork.py',
'p2p-fingerprint.py',
'uacomment.py',
+ 'p2p-acceptblock.py',
]
EXTENDED_SCRIPTS = [
@@ -152,7 +153,6 @@ EXTENDED_SCRIPTS = [
'txn_clone.py --mineblock',
'notifications.py',
'invalidateblock.py',
- 'p2p-acceptblock.py',
'replace-by-fee.py',
]
diff --git a/test/functional/wallet-accounts.py b/test/functional/wallet-accounts.py
index 40726d2a76..bc1efaee15 100755
--- a/test/functional/wallet-accounts.py
+++ b/test/functional/wallet-accounts.py
@@ -72,62 +72,135 @@ class WalletAccountsTest(BitcoinTestFramework):
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
- accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
- account_addresses = dict()
+
+ # Create accounts and make sure subsequent account API calls
+ # recognize the account/address associations.
+ accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
- address = node.getaccountaddress(account)
- account_addresses[account] = address
-
- node.getnewaddress(account)
- assert_equal(node.getaccount(address), account)
- assert(address in node.getaddressesbyaccount(account))
-
- node.sendfrom("", address, amount_to_send)
-
+ account.add_receive_address(node.getaccountaddress(account.name))
+ account.verify(node)
+
+ # Send a transaction to each account, and make sure this forces
+ # getaccountaddress to generate a new receiving address.
+ for account in accounts:
+ node.sendtoaddress(account.receive_address, amount_to_send)
+ account.add_receive_address(node.getaccountaddress(account.name))
+ account.verify(node)
+
+ # Check the amounts received.
node.generate(1)
+ for account in accounts:
+ assert_equal(
+ node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
+ assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
- for i in range(len(accounts)):
- from_account = accounts[i]
+ # Check that sendfrom account reduces listaccounts balances.
+ for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
- to_address = account_addresses[to_account]
- node.sendfrom(from_account, to_address, amount_to_send)
-
+ node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
-
for account in accounts:
- address = node.getaccountaddress(account)
- assert(address != account_addresses[account])
- assert_equal(node.getreceivedbyaccount(account), 2)
- node.move(account, "", node.getbalance(account))
-
+ account.add_receive_address(node.getaccountaddress(account.name))
+ account.verify(node)
+ assert_equal(node.getreceivedbyaccount(account.name), 2)
+ node.move(account.name, "", node.getbalance(account.name))
+ account.verify(node)
node.generate(101)
-
expected_account_balances = {"": 5200}
for account in accounts:
- expected_account_balances[account] = 0
-
+ expected_account_balances[account.name] = 0
assert_equal(node.listaccounts(), expected_account_balances)
-
assert_equal(node.getbalance(""), 5200)
+ # Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
- node.setaccount(address, account)
- assert(address in node.getaddressesbyaccount(account))
+ node.setaccount(address, account.name)
+ account.add_address(address)
+ account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
+ # Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
- multisig_address = node.addmultisigaddress(5, addresses, account)
+ multisig_address = node.addmultisigaddress(5, addresses, account.name)
+ account.add_address(multisig_address)
+ account.verify(node)
node.sendfrom("", multisig_address, 50)
-
node.generate(101)
-
for account in accounts:
- assert_equal(node.getbalance(account), 50)
+ assert_equal(node.getbalance(account.name), 50)
+
+ # Check that setaccount can change the account of an address from a
+ # different account.
+ change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
+
+ # Check that setaccount can change the account of an address which
+ # is the receiving address of a different account.
+ change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
+
+ # Check that setaccount can set the account of an address already
+ # in the account. This is a no-op.
+ change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
+
+ # Check that setaccount can set the account of an address which is
+ # already the receiving address of the account. It would probably make
+ # sense for this to be a no-op, but right now it resets the receiving
+ # address, causing getaccountaddress to return a brand new address.
+ change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
+
+class Account:
+ def __init__(self, name):
+ # Account name
+ self.name = name
+ # Current receiving address associated with this account.
+ self.receive_address = None
+ # List of all addresses assigned with this account
+ self.addresses = []
+
+ def add_address(self, address):
+ assert_equal(address not in self.addresses, True)
+ self.addresses.append(address)
+
+ def add_receive_address(self, address):
+ self.add_address(address)
+ self.receive_address = address
+
+ def verify(self, node):
+ if self.receive_address is not None:
+ assert self.receive_address in self.addresses
+ assert_equal(node.getaccountaddress(self.name), self.receive_address)
+
+ for address in self.addresses:
+ assert_equal(node.getaccount(address), self.name)
+
+ assert_equal(
+ set(node.getaddressesbyaccount(self.name)), set(self.addresses))
+
+
+def change_account(node, address, old_account, new_account):
+ assert_equal(address in old_account.addresses, True)
+ node.setaccount(address, new_account.name)
+
+ old_account.addresses.remove(address)
+ new_account.add_address(address)
+
+ # Calling setaccount on an address which was previously the receiving
+ # address of a different account should reset the receiving address of
+ # the old account, causing getaccountaddress to return a brand new
+ # address.
+ if address == old_account.receive_address:
+ new_address = node.getaccountaddress(old_account.name)
+ assert_equal(new_address not in old_account.addresses, True)
+ assert_equal(new_address not in new_account.addresses, True)
+ old_account.add_receive_address(new_address)
+
+ old_account.verify(node)
+ new_account.verify(node)
+
if __name__ == '__main__':
WalletAccountsTest().main()
diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py
index 15ea26afa1..85a149793e 100755
--- a/test/functional/walletbackup.py
+++ b/test/functional/walletbackup.py
@@ -190,6 +190,16 @@ class WalletBackupTest(BitcoinTestFramework):
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
+ # Backup to source wallet file must fail
+ sourcePaths = [
+ tmpdir + "/node0/regtest/wallet.dat",
+ tmpdir + "/node0/./regtest/wallet.dat",
+ tmpdir + "/node0/regtest/",
+ tmpdir + "/node0/regtest"]
+
+ for sourcePath in sourcePaths:
+ assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
+
if __name__ == '__main__':
WalletBackupTest().main()
diff --git a/test/functional/zmq_test.py b/test/functional/zmq_test.py
index 165f9192dd..fa30318416 100755
--- a/test/functional/zmq_test.py
+++ b/test/functional/zmq_test.py
@@ -8,10 +8,12 @@ import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
+from test_framework.mininode import CTransaction
from test_framework.util import (assert_equal,
bytes_to_hex_str,
hash256,
)
+from io import BytesIO
class ZMQSubscriber:
def __init__(self, socket, topic):
@@ -93,7 +95,10 @@ class ZMQTest (BitcoinTestFramework):
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
- assert_equal(hash256(hex), txid)
+ tx = CTransaction()
+ tx.deserialize(BytesIO(hex))
+ tx.calc_sha256()
+ assert_equal(tx.hash, bytes_to_hex_str(txid))
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())