aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorWladimir J. van der Laan <laanwj@protonmail.com>2020-10-14 17:42:30 +0200
committerWladimir J. van der Laan <laanwj@protonmail.com>2020-10-14 18:36:59 +0200
commitc2c4dbaebd955ad2829364f7fa5b8169ca1ba6b9 (patch)
tree2932ddeda0c1c8721df357632c7310b6b68073bf /test
parent99a1d572eabca89790216b3919a237e07063a376 (diff)
parentfd9a0060f028a4c01bd88f58777dea34bdcbafd1 (diff)
downloadbitcoin-c2c4dbaebd955ad2829364f7fa5b8169ca1ba6b9.tar.xz
Merge #19988: Overhaul transaction request logic
fd9a0060f028a4c01bd88f58777dea34bdcbafd1 Report and verify expirations (Pieter Wuille) 86f50ed10f66b5535f0162cf0026456a9e3f8963 Delete limitedmap as it is unused now (Pieter Wuille) cc16fff3e476a9378d2176b3c1b83ad12b1b052a Make txid delay penalty also apply to fetches of orphan's parents (Pieter Wuille) 173a1d2d3f824b83777ac713e89bee69fd87692d Expedite removal of tx requests that are no longer needed (Pieter Wuille) de11b0a4eff20da3e3ca52dc90948b5253d329c5 Reduce MAX_PEER_TX_ANNOUNCEMENTS for non-PF_RELAY peers (Pieter Wuille) 242d16477df1a024c7126bad23dde39cad217eca Change transaction request logic to use txrequest (Pieter Wuille) 5b03121d60527a193a84c339151481f9c9c1962b Add txrequest fuzz tests (Pieter Wuille) 3c7fe0e5a0ee1abf4dc263ae5310e68253c866e1 Add txrequest unit tests (Pieter Wuille) da3b8fde03f2e8060bb7ff3bff17175dab85f0cd Add txrequest module (Pieter Wuille) Pull request description: This replaces the transaction request logic with an encapsulated class that maintains all the state surrounding it. By keeping it stand alone, it can be easily tested (using included unit tests and fuzz tests). The major changes are: * Announcements from outbound (and whitelisted) peers are now always preferred over those from inbound peers. This used to be the case for the first request (by delaying the first request from inbound peers), and a bias afters. The 2s delay for requests from inbound peers still exists, but after that, if viable outbound peers remain for any given transaction, they will always be tried first. * No more hard cap of 100 in flight transactions per peer, as there is less need for it (memory usage is linear in the number of announcements, but independent from the number in flight, and CPU usage isn't affected by it). Furthermore, if only one peer announces a transaction, and it has over 100 in flight already, we still want to request it from them. The cap is replaced with a rule that announcements from such overloaded peers get an additional 2s delay (possibly combined with the existing 2s delays for inbound connections, and for txid peers when wtxid peers are available). * The limit of 100000 tracked announcements is reduced to 5000; this was excessive. This can be bypassed using the PF_RELAY permission (to accommodate locally dumping a batch of many transactions). This replaces #19184, rebased on #18044 and with many small changes. ACKs for top commit: ariard: Code Review ACK fd9a006. I've reviewed the new TxRequestTracker, its integration in net_processing, unit/functional/fuzzing test coverage. I looked more for soundness of new specification rather than functional consistency with old transaction request logic. MarcoFalke: Approach ACK fd9a0060f028a4c01bd88f58777dea34bdcbafd1 🏹 naumenkogs: Code Review ACK fd9a006. I've reviewed everything, mostly to see how this stuff works at the lower level (less documentation-wise, more implementation-wise), and to try breaking it with unexpected sequences of events. jnewbery: utACK fd9a0060f028a4c01bd88f58777dea34bdcbafd1 jonatack: WIP light ACK fd9a0060f028a4c01bd88f58777dea34bdcbafd1 have read the code, verified that each commit is hygienic, e.g. debug build clean and tests green, and have been running a node on and off with this branch and grepping the net debug log. Am still unpacking the discussion hidden by GitHub by fetching it via the API and connecting the dots, storing notes and suggestions in a local branch; at this point none are blockers. ryanofsky: Light code review ACK fd9a0060f028a4c01bd88f58777dea34bdcbafd1, looking at txrequest implementation, unit test implementation, and net_processing integration, just trying to understand how it works and looking for anything potentially confusing in the implementation. Didn't look at functional tests or catch up on review discussion. Just a sanity check review focused on: Tree-SHA512: ea7b52710371498b59d9c9cfb5230dd544fe9c6cb699e69178dea641646104f38a0b5ec7f5f0dbf1eb579b7ec25a31ea420593eff3b7556433daf92d4b0f0dd7
Diffstat (limited to 'test')
-rwxr-xr-xtest/functional/p2p_tx_download.py123
1 files changed, 105 insertions, 18 deletions
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 5c3f021b3f..16d9302db8 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -42,15 +42,15 @@ class TestP2PConn(P2PInterface):
# Constants from net_processing
GETDATA_TX_INTERVAL = 60 # seconds
-MAX_GETDATA_RANDOM_DELAY = 2 # seconds
INBOUND_PEER_TX_DELAY = 2 # seconds
TXID_RELAY_DELAY = 2 # seconds
+OVERLOADED_PEER_DELAY = 2 # seconds
MAX_GETDATA_IN_FLIGHT = 100
-TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10
+MAX_PEER_TX_ANNOUNCEMENTS = 5000
# Python test constants
NUM_INBOUND = 10
-MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
+MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
class TxDownloadTest(BitcoinTestFramework):
@@ -121,14 +121,12 @@ class TxDownloadTest(BitcoinTestFramework):
# * the first time it is re-requested from the outbound peer, plus
# * 2 seconds to avoid races
assert self.nodes[1].getpeerinfo()[0]['inbound'] == False
- timeout = 2 + (MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY) + (
- GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY)
+ timeout = 2 + INBOUND_PEER_TX_DELAY + GETDATA_TX_INTERVAL
self.log.info("Tx should be received at node 1 after {} seconds".format(timeout))
self.sync_mempools(timeout=timeout)
def test_in_flight_max(self):
- self.log.info("Test that we don't request more than {} transactions from any peer, every {} minutes".format(
- MAX_GETDATA_IN_FLIGHT, TX_EXPIRY_INTERVAL / 60))
+ self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format(MAX_GETDATA_IN_FLIGHT))
txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)]
p = self.nodes[0].p2ps[0]
@@ -136,31 +134,120 @@ class TxDownloadTest(BitcoinTestFramework):
with p2p_lock:
p.tx_getdata_count = 0
- p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids]))
+ mock_time = int(time.time() + 1)
+ self.nodes[0].setmocktime(mock_time)
+ for i in range(MAX_GETDATA_IN_FLIGHT):
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
+ p.sync_with_ping()
+ mock_time += INBOUND_PEER_TX_DELAY
+ self.nodes[0].setmocktime(mock_time)
p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT)
+ for i in range(MAX_GETDATA_IN_FLIGHT, len(txids)):
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
+ p.sync_with_ping()
+ self.log.info("No more than {} requests should be seen within {} seconds after announcement".format(MAX_GETDATA_IN_FLIGHT, INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1))
+ self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1)
+ p.sync_with_ping()
with p2p_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
+ self.log.info("If we wait {} seconds after announcement, we should eventually get more requests".format(INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY))
+ self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY)
+ p.wait_until(lambda: p.tx_getdata_count == len(txids))
- self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
- p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])]))
- p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10)
+ def test_expiry_fallback(self):
+ self.log.info('Check that expiry will select another peer for download')
+ WTXID = 0xffaa
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
- assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
+ peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to peer_expiry to expire
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+ self.restart_node(0) # reset mocktime
- WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL
- self.log.info("if we wait about {} minutes, we should eventually get more requests".format(WAIT_TIME / 60))
- self.nodes[0].setmocktime(int(time.time() + WAIT_TIME))
- p.wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2)
- self.nodes[0].setmocktime(0)
+ def test_disconnect_fallback(self):
+ self.log.info('Check that disconnect will select another peer for download')
+ WTXID = 0xffbb
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
+ with p2p_lock:
+ peer_disconnect, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ peer_disconnect.peer_disconnect()
+ peer_disconnect.wait_for_disconnect()
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+
+ def test_notfound_fallback(self):
+ self.log.info('Check that notfounds will select another peer for download immediately')
+ WTXID = 0xffdd
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
+ with p2p_lock:
+ peer_notfound, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_WTX, WTXID)])) # Send notfound, so that fallback peer is selected
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+
+ def test_preferred_inv(self):
+ self.log.info('Check that invs from preferred peers are downloaded immediately')
+ self.restart_node(0, extra_args=['-whitelist=noban@127.0.0.1'])
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0xff00ff00)]))
+ peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer.tx_getdata_count, 1)
+
+ def test_large_inv_batch(self):
+ self.log.info('Test how large inv batches are handled with relay permission')
+ self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1'])
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
+ peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1)
+
+ self.log.info('Test how large inv batches are handled without relay permission')
+ self.restart_node(0)
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
+ peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS)
+ peer.sync_with_ping()
+ with p2p_lock:
+ assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS)
def test_spurious_notfound(self):
self.log.info('Check that spurious notfound is ignored')
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
def run_test(self):
+ # Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes
+ self.test_expiry_fallback()
+ self.test_disconnect_fallback()
+ self.test_notfound_fallback()
+ self.test_preferred_inv()
+ self.test_large_inv_batch()
+ self.test_spurious_notfound()
+
# Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when
# the next trickle relay event happens.
- for test in [self.test_spurious_notfound, self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
+ for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
self.stop_nodes()
self.start_nodes()
self.connect_nodes(1, 0)