aboutsummaryrefslogtreecommitdiff
path: root/test/functional/sendheaders.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/functional/sendheaders.py')
-rwxr-xr-xtest/functional/sendheaders.py313
1 files changed, 182 insertions, 131 deletions
diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py
index fe577dc20a..256227f721 100755
--- a/test/functional/sendheaders.py
+++ b/test/functional/sendheaders.py
@@ -4,11 +4,24 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
-Setup:
+Setup:
-- Two nodes, two p2p connections to node0. One p2p connection should only ever
- receive inv's (omitted from testing description below, this is our control).
- Second node is used for creating reorgs.
+- Two nodes:
+ - node0 is the node-under-test. We create two p2p connections to it. The
+ first p2p connection is a control and should only ever receive inv's. The
+ second p2p connection tests the headers sending logic.
+ - node1 is used to create reorgs.
+
+test_null_locators
+==================
+
+Sends two getheaders requests with null locator values. First request's hashstop
+value refers to validated block, while second request's hashstop value refers to
+a block which hasn't been validated. Verifies only the first request returns
+headers.
+
+test_nonnull_locators
+=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
@@ -72,159 +85,197 @@ d. Announce 49 headers that don't connect.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
-
-from test_framework.mininode import *
-from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
+from test_framework.mininode import (
+ CBlockHeader,
+ CInv,
+ NODE_WITNESS,
+ network_thread_start,
+ P2PInterface,
+ mininode_lock,
+ msg_block,
+ msg_getblocks,
+ msg_getdata,
+ msg_getheaders,
+ msg_headers,
+ msg_inv,
+ msg_sendheaders,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ sync_blocks,
+ wait_until,
+)
+DIRECT_FETCH_RESPONSE_TIME = 0.05
-direct_fetch_response_time = 0.05
-
-class TestNode(NodeConnCB):
+class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
+
self.block_announced = False
self.last_blockhash_announced = None
- def clear_last_announcement(self):
- with mininode_lock:
- self.block_announced = False
- self.last_message.pop("inv", None)
- self.last_message.pop("headers", None)
-
- # Request data for a list of block hashes
- def get_data(self, block_hashes):
+ def send_get_data(self, block_hashes):
+ """Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
- self.connection.send_message(msg)
+ self.send_message(msg)
- def get_headers(self, locator, hashstop):
+ def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
- self.connection.send_message(msg)
+ self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
- self.connection.send_message(msg)
+ self.send_message(msg)
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [CBlockHeader(b) for b in new_blocks]
+ self.send_message(headers_message)
- def on_inv(self, conn, message):
+ def send_getblocks(self, locator):
+ getblocks_message = msg_getblocks()
+ getblocks_message.locator.vHave = locator
+ self.send_message(getblocks_message)
+
+ def wait_for_getdata(self, hash_list, timeout=60):
+ if hash_list == []:
+ return
+
+ test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_block_announcement(self, block_hash, timeout=60):
+ test_function = lambda: self.last_blockhash_announced == block_hash
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
- def on_headers(self, conn, message):
+ def on_headers(self, message):
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
- # Test whether the last announcement we received had the
- # right header or the right inv
- # inv and headers should be lists of block hashes
+ def clear_last_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_message.pop("inv", None)
+ self.last_message.pop("headers", None)
+
def check_last_announcement(self, headers=None, inv=None):
- expect_headers = headers if headers != None else []
- expect_inv = inv if inv != None else []
+ """Test whether the last announcement received had the right header or the right inv.
+
+ inv and headers should be lists of block hashes."""
+
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
+
with mininode_lock:
self.block_announced = False
- success = True
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
- if compare_inv != expect_inv:
- success = False
+ if inv is not None:
+ assert_equal(compare_inv, inv)
- hash_headers = []
+ compare_headers = []
if "headers" in self.last_message:
- # treat headers as a list of block hashes
- hash_headers = [ x.sha256 for x in self.last_message["headers"].headers ]
- if hash_headers != expect_headers:
- success = False
+ compare_headers = [x.sha256 for x in self.last_message["headers"].headers]
+ if headers is not None:
+ assert_equal(compare_headers, headers)
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
- return success
-
- def wait_for_getdata(self, hash_list, timeout=60):
- if hash_list == []:
- return
-
- test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
- return
-
- def wait_for_block_announcement(self, block_hash, timeout=60):
- test_function = lambda: self.last_blockhash_announced == block_hash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
- return
-
- def send_header_for_blocks(self, new_blocks):
- headers_message = msg_headers()
- headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
- self.send_message(headers_message)
-
- def send_getblocks(self, locator):
- getblocks_message = msg_getblocks()
- getblocks_message.locator.vHave = locator
- self.send_message(getblocks_message)
class SendHeadersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
- # mine count blocks and return the new tip
def mine_blocks(self, count):
+ """Mine count blocks and return the new tip."""
+
# Clear out last block announcement from each p2p listener
- [ x.clear_last_announcement() for x in self.p2p_connections ]
+ [x.clear_last_announcement() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
- # mine a reorg that invalidates length blocks (replacing them with
- # length+1 blocks).
- # Note: we clear the state of our p2p connections after the
- # to-be-reorged-out blocks are mined, so that we don't break later tests.
- # return the list of block hashes newly mined
def mine_reorg(self, length):
- self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
+ """Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
+
+ Note: we clear the state of our p2p connections after the
+ to-be-reorged-out blocks are mined, so that we don't break later tests.
+ return the list of block hashes newly mined."""
+
+ self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
- for x in self.p2p_connections:
+ for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
- hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
+ hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
- all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
+ all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
- inv_node = TestNode()
- test_node = TestNode()
-
- self.p2p_connections = [inv_node, test_node]
-
- connections = []
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
- # Set nServices to 0 for test_node, so no block download will occur outside of
- # direct fetching
- connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
- inv_node.add_connection(connections[0])
- test_node.add_connection(connections[1])
+ inv_node = self.nodes[0].add_p2p_connection(BaseNode())
+ # Make sure NODE_NETWORK is not set for test_node, so no block download
+ # will occur outside of direct fetching
+ test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
- NetworkThread().start() # Start up network handling in another thread
+ network_thread_start()
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
+ # Ensure verack's have been processed by our peer
+ inv_node.sync_with_ping()
+ test_node.sync_with_ping()
+
+ self.test_null_locators(test_node, inv_node)
+ self.test_nonnull_locators(test_node, inv_node)
+
+ def test_null_locators(self, test_node, inv_node):
+ tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
+ tip_hash = int(tip["hash"], 16)
+
+ inv_node.check_last_announcement(inv=[tip_hash], headers=[])
+ test_node.check_last_announcement(inv=[tip_hash], headers=[])
+
+ self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
+ test_node.clear_last_announcement()
+ test_node.send_get_headers(locator=[], hashstop=tip_hash)
+ test_node.check_last_announcement(headers=[tip_hash])
+
+ self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
+ block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
+ block.solve()
+ test_node.send_header_for_blocks([block])
+ test_node.clear_last_announcement()
+ test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
+ test_node.sync_with_ping()
+ assert_equal(test_node.block_announced, False)
+ inv_node.clear_last_announcement()
+ test_node.send_message(msg_block(block))
+ inv_node.check_last_announcement(inv=[int(block.hash, 16)], headers=[])
+
+ def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
@@ -233,30 +284,30 @@ class SendHeadersTest(BitcoinTestFramework):
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(inv=[tip], headers=[])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
- test_node.get_data([tip])
+ test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
- test_node.get_headers(locator=[old_tip], hashstop=tip)
- test_node.get_data([tip])
+ test_node.send_get_headers(locator=[old_tip], hashstop=tip)
+ test_node.send_get_data([tip])
test_node.wait_for_block(tip)
- test_node.clear_last_announcement() # since we requested headers...
+ test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
- new_block = create_block(tip, create_coinbase(height+1), block_time)
+ new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
- test_node.sync_with_ping() # make sure this block is processed
+ test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
@@ -267,15 +318,15 @@ class SendHeadersTest(BitcoinTestFramework):
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
- test_node.get_headers(locator=[prev_tip], hashstop=0)
+ test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(headers=[tip])
- height = self.nodes[0].getblockcount()+1
+ height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
@@ -284,7 +335,7 @@ class SendHeadersTest(BitcoinTestFramework):
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
- for b in range(i+1):
+ for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -298,7 +349,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
- [ inv_node.send_block_inv(x.sha256) for x in blocks ]
+ [inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
@@ -309,7 +360,7 @@ class SendHeadersTest(BitcoinTestFramework):
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
- [ test_node.send_message(msg_block(x)) for x in blocks ]
+ [test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
@@ -317,8 +368,8 @@ class SendHeadersTest(BitcoinTestFramework):
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(headers=[tip])
height += 1
block_time += 1
@@ -332,16 +383,16 @@ class SendHeadersTest(BitcoinTestFramework):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(headers=new_block_hashes)
- block_time += 8
+ block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(inv=[tip], headers=[])
block_time += 9
@@ -349,42 +400,42 @@ class SendHeadersTest(BitcoinTestFramework):
fork_point = int(fork_point, 16)
# Use getblocks/getdata
- test_node.send_getblocks(locator = [fork_point])
- assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
- test_node.get_data(new_block_hashes)
+ test_node.send_getblocks(locator=[fork_point])
+ test_node.check_last_announcement(inv=new_block_hashes, headers=[])
+ test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(inv=[tip], headers=[])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
- test_node.get_data([tip])
+ test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
- test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
- test_node.get_data([tip])
+ test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
+ test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
- test_node.get_data([tip])
+ test_node.send_get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
- # of headers announcements, or mine a new block and inv it, also
+ # of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
- test_node.get_headers(locator=[tip], hashstop=0)
+ test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
- assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
- assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+ inv_node.check_last_announcement(inv=[tip], headers=[])
+ test_node.check_last_announcement(headers=[tip])
self.log.info("Part 3: success!")
@@ -404,7 +455,7 @@ class SendHeadersTest(BitcoinTestFramework):
height += 1
inv_node.send_message(msg_block(blocks[-1]))
- inv_node.sync_with_ping() # Make sure blocks are processed
+ inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
@@ -423,9 +474,9 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
- test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
- [ test_node.send_message(msg_block(x)) for x in blocks ]
+ [test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
@@ -454,13 +505,13 @@ class SendHeadersTest(BitcoinTestFramework):
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
- test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
+ test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
- test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
+ test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
@@ -472,7 +523,7 @@ class SendHeadersTest(BitcoinTestFramework):
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
- [ test_node.send_message(msg_block(x)) for x in blocks ]
+ [test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
@@ -494,7 +545,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
- [ test_node.send_message(msg_block(x)) for x in blocks ]
+ [test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
@@ -502,7 +553,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
- for j in range(MAX_UNCONNECTING_HEADERS+1):
+ for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -524,11 +575,11 @@ class SendHeadersTest(BitcoinTestFramework):
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
- for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
+ for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
- test_node.send_header_for_blocks([blocks[i%len(blocks)]])
+ test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.