aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/net_processing.cpp98
-rw-r--r--test/README.md4
-rwxr-xr-xtest/functional/test_framework/mininode.py6
-rwxr-xr-xtest/functional/test_framework/test_framework.py10
-rwxr-xr-xtest/functional/test_framework/test_node.py14
-rw-r--r--test/functional/test_framework/util.py4
6 files changed, 73 insertions, 63 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 7e9bb2f27c..6d85b46831 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1608,6 +1608,37 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c
}
}
+//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed).
+CTransactionRef static FindTxForGetData(CNode* peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds longlived_mempool_time) LOCKS_EXCLUDED(cs_main)
+{
+ // Check if the requested transaction is so recent that we're just
+ // about to announce it to the peer; if so, they certainly shouldn't
+ // know we already have it.
+ {
+ LOCK(peer->m_tx_relay->cs_tx_inventory);
+ if (peer->m_tx_relay->setInventoryTxToSend.count(txid)) return {};
+ }
+
+ {
+ LOCK(cs_main);
+ // Look up transaction in relay pool
+ auto mi = mapRelay.find(txid);
+ if (mi != mapRelay.end()) return mi->second;
+ }
+
+ auto txinfo = mempool.info(txid);
+ if (txinfo.tx) {
+ // To protect privacy, do not answer getdata using the mempool when
+ // that TX couldn't have been INVed in reply to a MEMPOOL request,
+ // or when it's too recent to have expired from mapRelay.
+ if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= longlived_mempool_time) {
+ return txinfo.tx;
+ }
+ }
+
+ return {};
+}
+
void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
{
AssertLockNotHeld(cs_main);
@@ -1622,58 +1653,31 @@ void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnm
const std::chrono::seconds mempool_req = pfrom->m_tx_relay != nullptr ? pfrom->m_tx_relay->m_last_mempool_req.load()
: std::chrono::seconds::min();
- {
- LOCK(cs_main);
+ // Process as many TX items from the front of the getdata queue as
+ // possible, since they're common and it's efficient to batch process
+ // them.
+ while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
+ if (interruptMsgProc) return;
+ // The send buffer provides backpressure. If there's no space in
+ // the buffer, pause processing until the next call.
+ if (pfrom->fPauseSend) break;
- // Process as many TX items from the front of the getdata queue as
- // possible, since they're common and it's efficient to batch process
- // them.
- while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
- if (interruptMsgProc)
- return;
- // The send buffer provides backpressure. If there's no space in
- // the buffer, pause processing until the next call.
- if (pfrom->fPauseSend)
- break;
-
- const CInv &inv = *it++;
+ const CInv &inv = *it++;
- if (pfrom->m_tx_relay == nullptr) {
- // Ignore GETDATA requests for transactions from blocks-only peers.
- continue;
- }
+ if (pfrom->m_tx_relay == nullptr) {
+ // Ignore GETDATA requests for transactions from blocks-only peers.
+ continue;
+ }
- // Send stream from relay memory
- bool push = false;
- auto mi = mapRelay.find(inv.hash);
+ CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, mempool_req, longlived_mempool_time);
+ if (tx) {
int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
- if (mi != mapRelay.end()) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
- push = true;
- } else {
- auto txinfo = mempool.info(inv.hash);
- // To protect privacy, do not answer getdata using the mempool when
- // that TX couldn't have been INVed in reply to a MEMPOOL request,
- // or when it's too recent to have expired from mapRelay.
- if (txinfo.tx && (
- (mempool_req.count() && txinfo.m_time <= mempool_req)
- || (txinfo.m_time <= longlived_mempool_time)))
- {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
- push = true;
- }
- }
-
- if (push) {
- // We interpret fulfilling a GETDATA for a transaction as a
- // successful initial broadcast and remove it from our
- // unbroadcast set.
- mempool.RemoveUnbroadcastTx(inv.hash);
- } else {
- vNotFound.push_back(inv);
- }
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
+ mempool.RemoveUnbroadcastTx(inv.hash);
+ } else {
+ vNotFound.push_back(inv);
}
- } // release cs_main
+ }
// Only process one BLOCK item per call, since they're uncommon and can be
// expensive to process.
diff --git a/test/README.md b/test/README.md
index e1dab92a06..0210907878 100644
--- a/test/README.md
+++ b/test/README.md
@@ -225,6 +225,10 @@ gdb /home/example/bitcoind <pid>
Note: gdb attach step may require ptrace_scope to be modified, or `sudo` preceding the `gdb`.
See this link for considerations: https://www.kernel.org/doc/Documentation/security/Yama.txt
+Often while debugging rpc calls from functional tests, the test might reach timeout before
+process can return a response. Use `--timeout-factor 0` to disable all rpc timeouts for that partcular
+functional test. Ex: `test/functional/wallet_hd.py --timeout-factor 0`.
+
##### Profiling
An easy way to profile node performance during functional tests is provided
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index ba0391625e..bbd7350bf1 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -122,9 +122,9 @@ class P2PConnection(asyncio.Protocol):
def is_connected(self):
return self._transport is not None
- def peer_connect(self, dstaddr, dstport, *, net, factor):
+ def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
assert not self.is_connected
- self.factor = factor
+ self.timeout_factor = timeout_factor
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
@@ -372,7 +372,7 @@ class P2PInterface(P2PConnection):
# Connection helper methods
def wait_until(self, test_function, timeout):
- wait_until(test_function, timeout=timeout, lock=mininode_lock, factor=self.factor)
+ wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index c84a7e7c12..6126efd842 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -102,7 +102,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.bind_to_localhost_only = True
self.set_test_params()
self.parse_args()
- self.rpc_timeout = int(self.rpc_timeout * self.options.factor) # optionally, increase timeout by a factor
+ if self.options.timeout_factor == 0 :
+ self.options.timeout_factor = 99999
+ self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
@@ -169,7 +171,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument("--descriptors", default=False, action="store_true",
help="Run test using a descriptor wallet")
- parser.add_argument('--factor', type=float, default=1.0, help='adjust test timeouts by a factor')
+ parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
self.add_options(parser)
self.options = parser.parse_args()
@@ -445,7 +447,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
- factor=self.options.factor,
+ timeout_factor=self.options.timeout_factor,
bitcoind=binary[i],
bitcoin_cli=binary_cli[i],
version=versions[i],
@@ -592,7 +594,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
- factor=self.options.factor,
+ timeout_factor=self.options.timeout_factor,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 826faece68..d52aff6f7e 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -62,7 +62,7 @@ class TestNode():
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
- def __init__(self, i, datadir, *, chain, rpchost, timewait, factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
+ def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
@@ -128,7 +128,7 @@ class TestNode():
self.perf_subprocesses = {}
self.p2ps = []
- self.factor = factor
+ self.timeout_factor = timeout_factor
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
@@ -241,7 +241,7 @@ class TestNode():
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
- # overhead is trivial, and the added gurantees are worth
+ # overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
@@ -349,13 +349,13 @@ class TestNode():
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
- wait_until(self.is_node_stopped, timeout=timeout, factor=self.factor)
+ wait_until(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
- time_end = time.time() + timeout * self.factor
+ time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
@@ -512,7 +512,7 @@ class TestNode():
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
- p2p_conn.peer_connect(**kwargs, net=self.chain, factor=self.factor)()
+ p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
# Wait for the node to send us the version and verack
@@ -526,7 +526,7 @@ class TestNode():
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
- # in comparision to the upside of making tests less fragile and unexpected intermittent errors less likely.
+ # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 7466a3cab3..6dfea7efd2 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -208,10 +208,10 @@ def str_to_b64str(string):
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
-def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, factor=1.0):
+def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
- timeout = timeout * factor
+ timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout