aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configure.ac4
-rw-r--r--src/net.cpp5
-rwxr-xr-xtest/functional/feature_init.py46
-rwxr-xr-xtest/functional/p2p_getaddr_caching.py66
-rwxr-xr-xtest/functional/test_framework/test_node.py7
5 files changed, 87 insertions, 41 deletions
diff --git a/configure.ac b/configure.ac
index e09ef14156..f6d00d0283 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1454,6 +1454,10 @@ if test "$use_boost" = "yes"; then
dnl we don't use multi_index serialization
BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION"
+ if test "$enable_debug" = "yes" || test "$enable_fuzz" = "yes"; then
+ BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE"
+ fi
+
if test "$suppress_external_warnings" != "no"; then
BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS)
fi
diff --git a/src/net.cpp b/src/net.cpp
index 00f2136f4a..82b5a69eb5 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -2814,8 +2814,11 @@ std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addres
{
auto local_socket_bytes = requestor.addrBind.GetAddrBytes();
uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE)
- .Write(requestor.addr.GetNetwork())
+ .Write(requestor.ConnectedThroughNetwork())
.Write(local_socket_bytes.data(), local_socket_bytes.size())
+ // For outbound connections, the port of the bound address is randomly
+ // assigned by the OS and would therefore not be useful for seeding.
+ .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0)
.Finalize();
const auto current_time = GetTime<std::chrono::microseconds>();
auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{});
diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py
index d0cb1e10e2..13c7326519 100755
--- a/test/functional/feature_init.py
+++ b/test/functional/feature_init.py
@@ -49,33 +49,33 @@ class InitStressTest(BitcoinTestFramework):
assert_equal(200, node.getblockcount())
lines_to_terminate_after = [
- 'Validating signatures for all blocks',
- 'scheduler thread start',
- 'Starting HTTP server',
- 'Loading P2P addresses',
- 'Loading banlist',
- 'Loading block index',
- 'Switching active chainstate',
- 'Checking all blk files are present',
- 'Loaded best chain:',
- 'init message: Verifying blocks',
- 'init message: Starting network threads',
- 'net thread start',
- 'addcon thread start',
- 'loadblk thread start',
- 'txindex thread start',
- 'block filter index thread start',
- 'coinstatsindex thread start',
- 'msghand thread start',
- 'net thread start',
- 'addcon thread start',
+ b'Validating signatures for all blocks',
+ b'scheduler thread start',
+ b'Starting HTTP server',
+ b'Loading P2P addresses',
+ b'Loading banlist',
+ b'Loading block index',
+ b'Switching active chainstate',
+ b'Checking all blk files are present',
+ b'Loaded best chain:',
+ b'init message: Verifying blocks',
+ b'init message: Starting network threads',
+ b'net thread start',
+ b'addcon thread start',
+ b'loadblk thread start',
+ b'txindex thread start',
+ b'block filter index thread start',
+ b'coinstatsindex thread start',
+ b'msghand thread start',
+ b'net thread start',
+ b'addcon thread start',
]
if self.is_wallet_compiled():
- lines_to_terminate_after.append('Verifying wallet')
+ lines_to_terminate_after.append(b'Verifying wallet')
for terminate_line in lines_to_terminate_after:
- self.log.info(f"Starting node and will exit after line '{terminate_line}'")
- with node.wait_for_debug_log([terminate_line], ignore_case=True):
+ self.log.info(f"Starting node and will exit after line {terminate_line}")
+ with node.wait_for_debug_log([terminate_line]):
node.start(extra_args=['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1'])
self.log.debug("Terminating node after terminate line was found")
sigterm_node()
diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py
index d375af6fe1..c934a97729 100755
--- a/test/functional/p2p_getaddr_caching.py
+++ b/test/functional/p2p_getaddr_caching.py
@@ -14,6 +14,8 @@ from test_framework.p2p import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
+ PORT_MIN,
+ PORT_RANGE,
)
# As defined in net_processing.
@@ -42,6 +44,13 @@ class AddrReceiver(P2PInterface):
class AddrTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ # Start onion ports after p2p and rpc ports.
+ port = PORT_MIN + 2 * PORT_RANGE
+ self.onion_port1 = port
+ self.onion_port2 = port + 1
+ self.extra_args = [
+ [f"-bind=127.0.0.1:{self.onion_port1}=onion", f"-bind=127.0.0.1:{self.onion_port2}=onion"],
+ ]
def run_test(self):
self.log.info('Fill peer AddrMan with a lot of records')
@@ -55,35 +64,66 @@ class AddrTest(BitcoinTestFramework):
# only a fraction of all known addresses can be cached and returned.
assert(len(self.nodes[0].getnodeaddresses(0)) > int(MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100)))
- responses = []
+ last_response_on_local_bind = None
+ last_response_on_onion_bind1 = None
+ last_response_on_onion_bind2 = None
self.log.info('Send many addr requests within short time to receive same response')
N = 5
cur_mock_time = int(time.time())
for i in range(N):
- addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
- addr_receiver.send_and_ping(msg_getaddr())
+ addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver())
+ addr_receiver_local.send_and_ping(msg_getaddr())
+ addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1)
+ addr_receiver_onion1.send_and_ping(msg_getaddr())
+ addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2)
+ addr_receiver_onion2.send_and_ping(msg_getaddr())
+
# Trigger response
cur_mock_time += 5 * 60
self.nodes[0].setmocktime(cur_mock_time)
- addr_receiver.wait_until(addr_receiver.addr_received)
- responses.append(addr_receiver.get_received_addrs())
- for response in responses[1:]:
- assert_equal(response, responses[0])
- assert(len(response) == MAX_ADDR_TO_SEND)
+ addr_receiver_local.wait_until(addr_receiver_local.addr_received)
+ addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received)
+ addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received)
+
+ if i > 0:
+ # Responses from different binds should be unique
+ assert(last_response_on_local_bind != addr_receiver_onion1.get_received_addrs())
+ assert(last_response_on_local_bind != addr_receiver_onion2.get_received_addrs())
+ assert(last_response_on_onion_bind1 != addr_receiver_onion2.get_received_addrs())
+ # Responses on from the same bind should be the same
+ assert_equal(last_response_on_local_bind, addr_receiver_local.get_received_addrs())
+ assert_equal(last_response_on_onion_bind1, addr_receiver_onion1.get_received_addrs())
+ assert_equal(last_response_on_onion_bind2, addr_receiver_onion2.get_received_addrs())
+
+ last_response_on_local_bind = addr_receiver_local.get_received_addrs()
+ last_response_on_onion_bind1 = addr_receiver_onion1.get_received_addrs()
+ last_response_on_onion_bind2 = addr_receiver_onion2.get_received_addrs()
+
+ for response in [last_response_on_local_bind, last_response_on_onion_bind1, last_response_on_onion_bind2]:
+ assert_equal(len(response), MAX_ADDR_TO_SEND)
cur_mock_time += 3 * 24 * 60 * 60
self.nodes[0].setmocktime(cur_mock_time)
self.log.info('After time passed, see a new response to addr request')
- last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
- last_addr_receiver.send_and_ping(msg_getaddr())
+ addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver())
+ addr_receiver_local.send_and_ping(msg_getaddr())
+ addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1)
+ addr_receiver_onion1.send_and_ping(msg_getaddr())
+ addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2)
+ addr_receiver_onion2.send_and_ping(msg_getaddr())
+
# Trigger response
cur_mock_time += 5 * 60
self.nodes[0].setmocktime(cur_mock_time)
- last_addr_receiver.wait_until(last_addr_receiver.addr_received)
- # new response is different
- assert(set(responses[0]) != set(last_addr_receiver.get_received_addrs()))
+ addr_receiver_local.wait_until(addr_receiver_local.addr_received)
+ addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received)
+ addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received)
+ # new response is different
+ assert(set(last_response_on_local_bind) != set(addr_receiver_local.get_received_addrs()))
+ assert(set(last_response_on_onion_bind1) != set(addr_receiver_onion1.get_received_addrs()))
+ assert(set(last_response_on_onion_bind2) != set(addr_receiver_onion2.get_received_addrs()))
if __name__ == '__main__':
AddrTest().main()
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 7d2db391b6..03f6c8adea 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -423,7 +423,7 @@ class TestNode():
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
- def wait_for_debug_log(self, expected_msgs, timeout=60, ignore_case=False):
+ def wait_for_debug_log(self, expected_msgs, timeout=60):
"""
Block until we see a particular debug log message fragment or until we exceed the timeout.
Return:
@@ -431,18 +431,17 @@ class TestNode():
"""
time_end = time.time() + timeout * self.timeout_factor
prev_size = self.debug_log_bytes()
- re_flags = re.MULTILINE | (re.IGNORECASE if ignore_case else 0)
yield
while True:
found = True
- with open(self.debug_log_path, encoding='utf-8') as dl:
+ with open(self.debug_log_path, "rb") as dl:
dl.seek(prev_size)
log = dl.read()
for expected_msg in expected_msgs:
- if re.search(re.escape(expected_msg), log, flags=re_flags) is None:
+ if expected_msg not in log:
found = False
if found: