aboutsummaryrefslogtreecommitdiff
path: root/test/functional/test_framework
diff options
context:
space:
mode:
Diffstat (limited to 'test/functional/test_framework')
-rw-r--r--test/functional/test_framework/address.py2
-rw-r--r--test/functional/test_framework/blockstore.py2
-rw-r--r--test/functional/test_framework/blocktools.py66
-rwxr-xr-xtest/functional/test_framework/comptool.py129
-rw-r--r--test/functional/test_framework/coverage.py2
-rw-r--r--test/functional/test_framework/messages.py45
-rwxr-xr-xtest/functional/test_framework/mininode.py402
-rw-r--r--test/functional/test_framework/netutil.py2
-rw-r--r--test/functional/test_framework/script.py4
-rw-r--r--test/functional/test_framework/siphash.py2
-rw-r--r--test/functional/test_framework/socks5.py2
-rwxr-xr-xtest/functional/test_framework/test_framework.py50
-rwxr-xr-xtest/functional/test_framework/test_node.py101
-rw-r--r--test/functional/test_framework/util.py7
14 files changed, 467 insertions, 349 deletions
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
index 2e2db5ffb2..b076c9dd43 100644
--- a/test/functional/test_framework/address.py
+++ b/test/functional/test_framework/address.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2016 The Bitcoin Core developers
+# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py
index 051c57a6c7..6067a407cc 100644
--- a/test/functional/test_framework/blockstore.py
+++ b/test/functional/test_framework/blockstore.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""BlockStore and TxStore helper classes."""
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index 5dcf516dc6..642ef98a27 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -1,11 +1,27 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
+from .address import (
+ key_to_p2sh_p2wpkh,
+ key_to_p2wpkh,
+ script_to_p2sh_p2wsh,
+ script_to_p2wsh,
+)
from .mininode import *
-from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
+from .script import (
+ CScript,
+ OP_0,
+ OP_1,
+ OP_CHECKMULTISIG,
+ OP_CHECKSIG,
+ OP_RETURN,
+ OP_TRUE,
+ hash160,
+)
+from .util import assert_equal
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
@@ -108,3 +124,49 @@ def get_legacy_sigopcount_tx(tx, fAccurate=True):
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
+
+# Create a scriptPubKey corresponding to either a P2WPKH output for the
+# given pubkey, or a P2WSH output of a 1-of-1 multisig for the given
+# pubkey. Returns the hex encoding of the scriptPubKey.
+def witness_script(use_p2wsh, pubkey):
+ if (use_p2wsh == False):
+ # P2WPKH instead
+ pubkeyhash = hash160(hex_str_to_bytes(pubkey))
+ pkscript = CScript([OP_0, pubkeyhash])
+ else:
+ # 1-of-1 multisig
+ witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
+ scripthash = sha256(witness_program)
+ pkscript = CScript([OP_0, scripthash])
+ return bytes_to_hex_str(pkscript)
+
+# Return a transaction (in hex) that spends the given utxo to a segwit output,
+# optionally wrapping the segwit output using P2SH.
+def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
+ if use_p2wsh:
+ program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
+ addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
+ else:
+ addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
+ if not encode_p2sh:
+ assert_equal(node.validateaddress(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
+ return node.createrawtransaction([utxo], {addr: amount})
+
+# Create a transaction spending a given utxo to a segwit output corresponding
+# to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH;
+# encode_p2sh determines whether to wrap in P2SH.
+# sign=True will have the given node sign the transaction.
+# insert_redeem_script will be added to the scriptSig, if given.
+def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
+ tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
+ if (sign):
+ signed = node.signrawtransaction(tx_to_witness)
+ assert("errors" not in signed or len(["errors"]) == 0)
+ return node.sendrawtransaction(signed["hex"])
+ else:
+ if (insert_redeem_script):
+ tx = FromHex(CTransaction(), tx_to_witness)
+ tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
+ tx_to_witness = ToHex(tx)
+
+ return node.sendrawtransaction(tx_to_witness)
diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py
index 723826bae4..61ea2280e2 100755
--- a/test/functional/test_framework/comptool.py
+++ b/test/functional/test_framework/comptool.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more bitcoinds to each other.
@@ -39,11 +39,10 @@ class RejectResult():
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
-class TestNode(NodeConnCB):
+class TestNode(P2PInterface):
def __init__(self, block_store, tx_store):
super().__init__()
- self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
@@ -58,26 +57,23 @@ class TestNode(NodeConnCB):
self.lastInv = []
self.closed = False
- def on_close(self, conn):
+ def on_close(self):
self.closed = True
- def add_connection(self, conn):
- self.conn = conn
-
- def on_headers(self, conn, message):
+ def on_headers(self, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
- def on_getheaders(self, conn, message):
+ def on_getheaders(self, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
- conn.send_message(response)
+ self.send_message(response)
- def on_getdata(self, conn, message):
- [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
- [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
+ def on_getdata(self, message):
+ [self.send_message(r) for r in self.block_store.get_blocks(message.inv)]
+ [self.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX
@@ -85,16 +81,16 @@ class TestNode(NodeConnCB):
elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK
self.block_request_map[i.hash] = True
- def on_inv(self, conn, message):
+ def on_inv(self, message):
self.lastInv = [x.hash for x in message.inv]
- def on_pong(self, conn, message):
+ def on_pong(self, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
- def on_reject(self, conn, message):
+ def on_reject(self, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
@@ -102,30 +98,30 @@ class TestNode(NodeConnCB):
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
- self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
+ self.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
- self.conn.send_message(m)
+ self.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
- self.conn.send_message(m)
+ self.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
- self.conn.send_message(msg_ping(nonce))
+ self.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
- self.conn.send_message(msg_mempool())
+ self.send_message(msg_mempool())
# TestInstance:
#
@@ -166,8 +162,7 @@ class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
- self.connections = []
- self.test_nodes = []
+ self.p2p_connections= []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
@@ -175,28 +170,24 @@ class TestManager():
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
- test_node = TestNode(self.block_store, self.tx_store)
- self.test_nodes.append(test_node)
- self.connections.append(NodeConn('127.0.0.1', p2p_port(i), test_node))
- # Make sure the TestNode (callback class) has a reference to its
- # associated NodeConn
- test_node.add_connection(self.connections[-1])
+ node = TestNode(self.block_store, self.tx_store)
+ node.peer_connect('127.0.0.1', p2p_port(i))
+ self.p2p_connections.append(node)
def clear_all_connections(self):
- self.connections = []
- self.test_nodes = []
+ self.p2p_connections = []
def wait_for_disconnections(self):
def disconnected():
- return all(node.closed for node in self.test_nodes)
+ return all(node.closed for node in self.p2p_connections)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
- return all(node.wait_for_verack() for node in self.test_nodes)
+ return all(node.wait_for_verack() for node in self.p2p_connections)
def wait_for_pings(self, counter):
def received_pongs():
- return all(node.received_ping_response(counter) for node in self.test_nodes)
+ return all(node.received_ping_response(counter) for node in self.p2p_connections)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
@@ -206,17 +197,17 @@ class TestManager():
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
- for node in self.test_nodes
+ for node in self.p2p_connections
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
- [ c.cb.send_getheaders() for c in self.connections ]
+ [ c.send_getheaders() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
@@ -226,42 +217,42 @@ class TestManager():
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
- for node in self.test_nodes
+ for node in self.p2p_connections
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
- [ c.cb.send_mempool() for c in self.connections ]
+ [ c.send_mempool() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
- [ c.cb.lastInv.sort() for c in self.connections ]
+ [ c.lastInv.sort() for c in self.p2p_connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
- for c in self.connections:
+ for c in self.p2p_connections:
if outcome is None:
- if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ if c.bestblockhash != self.p2p_connections[0].bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
- if c.cb.bestblockhash == blockhash:
+ if c.bestblockhash == blockhash:
return False
- if blockhash not in c.cb.block_reject_map:
+ if blockhash not in c.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
- if not outcome.match(c.cb.block_reject_map[blockhash]):
- logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
+ if not outcome.match(c.block_reject_map[blockhash]):
+ logger.error('Block rejected with %s instead of expected %s: %064x' % (c.block_reject_map[blockhash], outcome, blockhash))
return False
- elif ((c.cb.bestblockhash == blockhash) != outcome):
+ elif ((c.bestblockhash == blockhash) != outcome):
return False
return True
@@ -273,21 +264,21 @@ class TestManager():
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
- for c in self.connections:
+ for c in self.p2p_connections:
if outcome is None:
# Make sure the mempools agree with each other
- if c.cb.lastInv != self.connections[0].cb.lastInv:
+ if c.lastInv != self.p2p_connections[0].lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
- if txhash in c.cb.lastInv:
+ if txhash in c.lastInv:
return False
- if txhash not in c.cb.tx_reject_map:
+ if txhash not in c.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
- if not outcome.match(c.cb.tx_reject_map[txhash]):
- logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
+ if not outcome.match(c.tx_reject_map[txhash]):
+ logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.tx_reject_map[txhash], outcome, txhash))
return False
- elif ((txhash in c.cb.lastInv) != outcome):
+ elif ((txhash in c.lastInv) != outcome):
return False
return True
@@ -332,25 +323,25 @@ class TestManager():
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
- for c in self.connections:
- if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
+ for c in self.p2p_connections:
+ if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
- c.cb.block_request_map[block.sha256] = False
+ c.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
- [ c.cb.send_inv(block) for c in self.connections ]
+ [ c.send_inv(block) for c in self.p2p_connections ]
self.sync_blocks(block.sha256, 1)
else:
- [ c.send_message(msg_block(block)) for c in self.connections ]
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_message(msg_block(block)) for c in self.p2p_connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
@@ -360,7 +351,7 @@ class TestManager():
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
- [ c.cb.send_header(block_header) for c in self.connections ]
+ [ c.send_header(block_header) for c in self.p2p_connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
@@ -369,11 +360,11 @@ class TestManager():
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
- for c in self.connections:
- c.cb.tx_request_map[tx.sha256] = False
+ for c in self.p2p_connections:
+ c.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
- [ c.cb.send_inv(tx) for c in self.connections ]
+ [ c.send_inv(tx) for c in self.p2p_connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
@@ -381,26 +372,26 @@ class TestManager():
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
- [ c.disconnect_node() for c in self.connections ]
+ [ c.disconnect_node() for c in self.p2p_connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py
index ddc3c515b2..f8761f2bb3 100644
--- a/test/functional/test_framework/coverage.py
+++ b/test/functional/test_framework/coverage.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 40d02f3ee0..a54a0299c7 100644
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -24,7 +24,7 @@ import struct
import time
from test_framework.siphash import siphash256
-from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
+from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
@@ -38,10 +38,11 @@ COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
-# NODE_BLOOM = (1 << 2)
+NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
+NODE_NETWORK_LIMITED = (1 << 10)
# Serialization/deserialization tools
def sha256(s):
@@ -167,21 +168,6 @@ def ser_string_vector(l):
return r
-def deser_int_vector(f):
- nit = deser_compact_size(f)
- r = []
- for i in range(nit):
- t = struct.unpack("<i", f.read(4))[0]
- r.append(t)
- return r
-
-
-def ser_int_vector(l):
- r = ser_compact_size(len(l))
- for i in l:
- r += struct.pack("<i", i)
- return r
-
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
@@ -467,10 +453,10 @@ class CTransaction():
r += struct.pack("<I", self.nLockTime)
return r
- # Regular serialization is without witness -- must explicitly
- # call serialize_with_witness to include witness data.
+ # Regular serialization is with witness -- must explicitly
+ # call serialize_without_witness to exclude witness data.
def serialize(self):
- return self.serialize_without_witness()
+ return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
@@ -486,7 +472,7 @@ class CTransaction():
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
- self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
+ self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
@@ -583,7 +569,7 @@ class CBlock(CBlockHeader):
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
- r += ser_vector(self.vtx)
+ r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@@ -650,7 +636,7 @@ class PrefilledTransaction():
self.tx = CTransaction()
self.tx.deserialize(f)
- def serialize(self, with_witness=False):
+ def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
@@ -659,6 +645,9 @@ class PrefilledTransaction():
r += self.tx.serialize_without_witness()
return r
+ def serialize_without_witness(self):
+ return self.serialize(with_witness=False)
+
def serialize_with_witness(self):
return self.serialize(with_witness=True)
@@ -698,7 +687,7 @@ class P2PHeaderAndShortIDs():
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
- r += ser_vector(self.prefilled_txn)
+ r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
@@ -829,13 +818,13 @@ class BlockTransactions():
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
- def serialize(self, with_witness=False):
+ def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
- r += ser_vector(self.transactions)
+ r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
@@ -1035,7 +1024,7 @@ class msg_block():
self.block.deserialize(f)
def serialize(self):
- return self.block.serialize()
+ return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
@@ -1306,7 +1295,7 @@ class msg_blocktxn():
def serialize(self):
r = b""
- r += self.block_transactions.serialize()
+ r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 24c96b5681..fe14591139 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
-# Copyright (c) 2010-2016 The Bitcoin Core developers
+# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
@@ -9,10 +9,8 @@
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
-NodeConn: an object which manages p2p connectivity to a bitcoin node
-NodeConnCB: a base class that describes the interface for receiving
- callbacks with network messages from a NodeConn
-"""
+P2PConnection: A low-level connection object to a node's P2P interface
+P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
@@ -20,10 +18,10 @@ import logging
import socket
import struct
import sys
-import time
-from threading import RLock, Thread
+import threading
from test_framework.messages import *
+from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
@@ -57,173 +55,36 @@ MAGIC_BYTES = {
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
-class NodeConnCB():
- """Callback and helper functions for P2P connection to a bitcoind node.
+class P2PConnection(asyncore.dispatcher):
+ """A low-level connection object to a node's P2P interface.
- Individual testcases should subclass this and override the on_* methods
- if they want to alter message handling behaviour."""
- def __init__(self):
- # Track whether we have a P2P connection open to the node
- self.connected = False
- self.connection = None
-
- # Track number of messages of each type received and the most recent
- # message of each type
- self.message_count = defaultdict(int)
- self.last_message = {}
-
- # A count of the number of ping messages we've sent to the node
- self.ping_counter = 1
-
- # Message receiving methods
-
- def deliver(self, conn, message):
- """Receive message and dispatch message to appropriate callback.
-
- We keep a count of how many of each message type has been received
- and the most recent message of each type."""
- with mininode_lock:
- try:
- command = message.command.decode('ascii')
- self.message_count[command] += 1
- self.last_message[command] = message
- getattr(self, 'on_' + command)(conn, message)
- except:
- print("ERROR delivering %s (%s)" % (repr(message),
- sys.exc_info()[0]))
- raise
-
- # Callback methods. Can be overridden by subclasses in individual test
- # cases to provide custom message handling behaviour.
-
- def on_open(self, conn):
- self.connected = True
-
- def on_close(self, conn):
- self.connected = False
- self.connection = None
-
- def on_addr(self, conn, message): pass
- def on_block(self, conn, message): pass
- def on_blocktxn(self, conn, message): pass
- def on_cmpctblock(self, conn, message): pass
- def on_feefilter(self, conn, message): pass
- def on_getaddr(self, conn, message): pass
- def on_getblocks(self, conn, message): pass
- def on_getblocktxn(self, conn, message): pass
- def on_getdata(self, conn, message): pass
- def on_getheaders(self, conn, message): pass
- def on_headers(self, conn, message): pass
- def on_mempool(self, conn): pass
- def on_pong(self, conn, message): pass
- def on_reject(self, conn, message): pass
- def on_sendcmpct(self, conn, message): pass
- def on_sendheaders(self, conn, message): pass
- def on_tx(self, conn, message): pass
-
- def on_inv(self, conn, message):
- want = msg_getdata()
- for i in message.inv:
- if i.type != 0:
- want.inv.append(i)
- if len(want.inv):
- conn.send_message(want)
-
- def on_ping(self, conn, message):
- conn.send_message(msg_pong(message.nonce))
-
- def on_verack(self, conn, message):
- self.verack_received = True
-
- def on_version(self, conn, message):
- assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
- conn.send_message(msg_verack())
- conn.nServices = message.nServices
+ This class is responsible for:
- # Connection helper methods
-
- def add_connection(self, conn):
- self.connection = conn
+ - opening and closing the TCP connection to the node
+ - reading bytes from and writing bytes to the socket
+ - deserializing and serializing the P2P message header
+ - logging messages as they are sent and received
- def wait_for_disconnect(self, timeout=60):
- test_function = lambda: not self.connected
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
+ This class contains no logic for handing the P2P message payloads. It must be
+ sub-classed and the on_message() callback overridden."""
- # Message receiving helper methods
-
- def wait_for_block(self, blockhash, timeout=60):
- test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_getdata(self, timeout=60):
- test_function = lambda: self.last_message.get("getdata")
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_getheaders(self, timeout=60):
- test_function = lambda: self.last_message.get("getheaders")
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_inv(self, expected_inv, timeout=60):
- """Waits for an INV message and checks that the first inv object in the message was as expected."""
- if len(expected_inv) > 1:
- raise NotImplementedError("wait_for_inv() will only verify the first inv object")
- test_function = lambda: self.last_message.get("inv") and \
- self.last_message["inv"].inv[0].type == expected_inv[0].type and \
- self.last_message["inv"].inv[0].hash == expected_inv[0].hash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_verack(self, timeout=60):
- test_function = lambda: self.message_count["verack"]
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- # Message sending helper functions
-
- def send_message(self, message):
- if self.connection:
- self.connection.send_message(message)
- else:
- logger.error("Cannot send message. No connection to node!")
-
- def send_and_ping(self, message):
- self.send_message(message)
- self.sync_with_ping()
-
- # Sync up with the node
- def sync_with_ping(self, timeout=60):
- self.send_message(msg_ping(nonce=self.ping_counter))
- test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
- self.ping_counter += 1
-
-class NodeConn(asyncore.dispatcher):
- """The actual NodeConn class
+ def __init__(self):
+ # All P2PConnections must be created before starting the NetworkThread.
+ # assert that the network thread is not running.
+ assert not network_thread_running()
- This class provides an interface for a p2p connection to a specified node."""
+ super().__init__(map=mininode_socket_map)
- def __init__(self, dstaddr, dstport, callback, net="regtest", services=NODE_NETWORK|NODE_WITNESS, send_version=True):
- asyncore.dispatcher.__init__(self, map=mininode_socket_map)
+ def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
- self.last_sent = 0
self.state = "connecting"
self.network = net
- self.cb = callback
self.disconnect = False
- self.nServices = 0
-
- if send_version:
- # stuff version msg into sendbuf
- vt = msg_version()
- vt.nServices = services
- vt.addrTo.ip = self.dstaddr
- vt.addrTo.port = self.dstport
- vt.addrFrom.ip = "0.0.0.0"
- vt.addrFrom.port = 0
- self.send_message(vt, True)
logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
@@ -232,15 +93,22 @@ class NodeConn(asyncore.dispatcher):
except:
self.handle_close()
+ def peer_disconnect(self):
+ # Connection could have already been closed by other end.
+ if self.state == "connected":
+ self.disconnect_node()
+
# Connection and disconnection methods
def handle_connect(self):
+ """asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
- self.cb.on_open(self)
+ self.on_open()
def handle_close(self):
+ """asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
@@ -249,10 +117,10 @@ class NodeConn(asyncore.dispatcher):
self.close()
except:
pass
- self.cb.on_close(self)
+ self.on_close()
def disconnect_node(self):
- """ Disconnect the p2p connection.
+ """Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
@@ -260,16 +128,19 @@ class NodeConn(asyncore.dispatcher):
# Socket read methods
- def readable(self):
- return True
-
def handle_read(self):
+ """asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
- self.got_data()
+ self._on_data()
- def got_data(self):
+ def _on_data(self):
+ """Try to read P2P messages from the recv buffer.
+
+ This method reads data from the buffer in a loop. It deserializes,
+ parses and verifies the P2P header, then passes the P2P payload to
+ the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
@@ -294,26 +165,27 @@ class NodeConn(asyncore.dispatcher):
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
- self.got_message(t)
+ self._log_message("receive", t)
+ self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
- def got_message(self, message):
- if self.last_sent + 30 * 60 < time.time():
- self.send_message(MESSAGEMAP[b'ping']())
- self._log_message("receive", message)
- self.cb.deliver(self, message)
+ def on_message(self, message):
+ """Callback for processing a P2P payload. Must be overridden by derived class."""
+ raise NotImplementedError
# Socket write methods
def writable(self):
+ """asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
+ """asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
@@ -331,6 +203,10 @@ class NodeConn(asyncore.dispatcher):
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
+ """Send a P2P message over the socket.
+
+ This method takes a P2P payload, builds the P2P header and adds
+ the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
@@ -353,11 +229,11 @@ class NodeConn(asyncore.dispatcher):
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
- self.last_sent = time.time()
# Class utility methods
def _log_message(self, direction, msg):
+ """Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
@@ -368,6 +244,152 @@ class NodeConn(asyncore.dispatcher):
logger.debug(log_message)
+class P2PInterface(P2PConnection):
+ """A high-level P2P interface class for communicating with a Bitcoin node.
+
+ This class provides high-level callbacks for processing P2P message
+ payloads, as well as convenience methods for interacting with the
+ node over P2P.
+
+ Individual testcases should subclass this and override the on_* methods
+ if they want to alter message handling behaviour."""
+ def __init__(self):
+ super().__init__()
+
+ # Track number of messages of each type received and the most recent
+ # message of each type
+ self.message_count = defaultdict(int)
+ self.last_message = {}
+
+ # A count of the number of ping messages we've sent to the node
+ self.ping_counter = 1
+
+ # The network services received from the peer
+ self.nServices = 0
+
+ def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
+ super().peer_connect(*args, **kwargs)
+
+ if send_version:
+ # Send a version msg
+ vt = msg_version()
+ vt.nServices = services
+ vt.addrTo.ip = self.dstaddr
+ vt.addrTo.port = self.dstport
+ vt.addrFrom.ip = "0.0.0.0"
+ vt.addrFrom.port = 0
+ self.send_message(vt, True)
+
+ # Message receiving methods
+
+ def on_message(self, message):
+ """Receive message and dispatch message to appropriate callback.
+
+ We keep a count of how many of each message type has been received
+ and the most recent message of each type."""
+ with mininode_lock:
+ try:
+ command = message.command.decode('ascii')
+ self.message_count[command] += 1
+ self.last_message[command] = message
+ getattr(self, 'on_' + command)(message)
+ except:
+ print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
+ raise
+
+ # Callback methods. Can be overridden by subclasses in individual test
+ # cases to provide custom message handling behaviour.
+
+ def on_open(self):
+ pass
+
+ def on_close(self):
+ pass
+
+ def on_addr(self, message): pass
+ def on_block(self, message): pass
+ def on_blocktxn(self, message): pass
+ def on_cmpctblock(self, message): pass
+ def on_feefilter(self, message): pass
+ def on_getaddr(self, message): pass
+ def on_getblocks(self, message): pass
+ def on_getblocktxn(self, message): pass
+ def on_getdata(self, message): pass
+ def on_getheaders(self, message): pass
+ def on_headers(self, message): pass
+ def on_mempool(self, message): pass
+ def on_pong(self, message): pass
+ def on_reject(self, message): pass
+ def on_sendcmpct(self, message): pass
+ def on_sendheaders(self, message): pass
+ def on_tx(self, message): pass
+
+ def on_inv(self, message):
+ want = msg_getdata()
+ for i in message.inv:
+ if i.type != 0:
+ want.inv.append(i)
+ if len(want.inv):
+ self.send_message(want)
+
+ def on_ping(self, message):
+ self.send_message(msg_pong(message.nonce))
+
+ def on_verack(self, message):
+ self.verack_received = True
+
+ def on_version(self, message):
+ assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
+ self.send_message(msg_verack())
+ self.nServices = message.nServices
+
+ # Connection helper methods
+
+ def wait_for_disconnect(self, timeout=60):
+ test_function = lambda: self.state != "connected"
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ # Message receiving helper methods
+
+ def wait_for_block(self, blockhash, timeout=60):
+ test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_getdata(self, timeout=60):
+ test_function = lambda: self.last_message.get("getdata")
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_getheaders(self, timeout=60):
+ test_function = lambda: self.last_message.get("getheaders")
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_inv(self, expected_inv, timeout=60):
+ """Waits for an INV message and checks that the first inv object in the message was as expected."""
+ if len(expected_inv) > 1:
+ raise NotImplementedError("wait_for_inv() will only verify the first inv object")
+ test_function = lambda: self.last_message.get("inv") and \
+ self.last_message["inv"].inv[0].type == expected_inv[0].type and \
+ self.last_message["inv"].inv[0].hash == expected_inv[0].hash
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_verack(self, timeout=60):
+ test_function = lambda: self.message_count["verack"]
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ # Message sending helper functions
+
+ def send_and_ping(self, message):
+ self.send_message(message)
+ self.sync_with_ping()
+
+ # Sync up with the node
+ def sync_with_ping(self, timeout=60):
+ self.send_message(msg_ping(nonce=self.ping_counter))
+ test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+ self.ping_counter += 1
+
+
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
@@ -375,13 +397,16 @@ mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
-# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
+# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
-# access to any data shared with the NodeConnCB or NodeConn.
-mininode_lock = RLock()
+# access to any data shared with the P2PInterface or P2PConnection.
+mininode_lock = threading.RLock()
+
+class NetworkThread(threading.Thread):
+ def __init__(self):
+ super().__init__(name="NetworkThread")
-class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
@@ -394,3 +419,24 @@ class NetworkThread(Thread):
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
+
+def network_thread_start():
+ """Start the network thread."""
+ # Only one network thread may run at a time
+ assert not network_thread_running()
+
+ NetworkThread().start()
+
+def network_thread_running():
+ """Return whether the network thread is running."""
+ return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
+
+def network_thread_join(timeout=10):
+ """Wait timeout seconds for the network thread to terminate.
+
+ Throw if the network thread doesn't terminate in timeout seconds."""
+ network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
+ assert len(network_threads) <= 1
+ for thread in network_threads:
+ thread.join(timeout)
+ assert not thread.is_alive()
diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py
index e5d415788f..96fe283347 100644
--- a/test/functional/test_framework/netutil.py
+++ b/test/functional/test_framework/netutil.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index a4c046bd3d..dae8a4e569 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
@@ -641,7 +641,7 @@ def SignatureHash(script, txTo, inIdx, hashtype):
txtmp.vin = []
txtmp.vin.append(tmp)
- s = txtmp.serialize()
+ s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
diff --git a/test/functional/test_framework/siphash.py b/test/functional/test_framework/siphash.py
index f68ecad36b..6ffc982cea 100644
--- a/test/functional/test_framework/siphash.py
+++ b/test/functional/test_framework/siphash.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2016 The Bitcoin Core developers
+# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
diff --git a/test/functional/test_framework/socks5.py b/test/functional/test_framework/socks5.py
index 7b40c47fbf..4721809a3b 100644
--- a/test/functional/test_framework/socks5.py
+++ b/test/functional/test_framework/socks5.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Dummy Socks5 server for testing."""
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 8df50474f3..f8d66def64 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python3
-# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
-from collections import deque
from enum import Enum
import logging
import optparse
@@ -14,7 +13,6 @@ import shutil
import sys
import tempfile
import time
-import traceback
from .authproxy import JSONRPCException
from . import coverage
@@ -64,6 +62,7 @@ class BitcoinTestFramework():
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
+ self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
@@ -93,6 +92,8 @@ class BitcoinTestFramework():
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
+ parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
+ help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
@@ -115,6 +116,8 @@ class BitcoinTestFramework():
success = TestStatus.FAILED
try:
+ if self.options.usecli and not self.supports_cli:
+ raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
self.run_test()
@@ -149,32 +152,19 @@ class BitcoinTestFramework():
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
- if os.getenv("PYTHON_DEBUG", ""):
- # Dump the end of the debug logs, to aid in debugging rare
- # travis failures.
- import glob
- filenames = [self.options.tmpdir + "/test_framework.log"]
- filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
- MAX_LINES_TO_PRINT = 1000
- for fn in filenames:
- try:
- with open(fn, 'r') as f:
- print("From", fn, ":")
- print("".join(deque(f, MAX_LINES_TO_PRINT)))
- except OSError:
- print("Opening file %s failed." % fn)
- traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
- sys.exit(TEST_EXIT_PASSED)
+ exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
- sys.exit(TEST_EXIT_SKIPPED)
+ exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
- logging.shutdown()
- sys.exit(TEST_EXIT_FAILED)
+ self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
+ exit_code = TEST_EXIT_FAILED
+ logging.shutdown()
+ sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
@@ -228,20 +218,20 @@ class BitcoinTestFramework():
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
- self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
+ self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
- def start_node(self, i, extra_args=None, stderr=None):
+ def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
- node.start(extra_args, stderr)
+ node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
- def start_nodes(self, extra_args=None):
+ def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
@@ -249,7 +239,7 @@ class BitcoinTestFramework():
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
- node.start(extra_args[i])
+ node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
@@ -281,10 +271,10 @@ class BitcoinTestFramework():
self.stop_node(i)
self.start_node(i, extra_args)
- def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
+ def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
- self.start_node(i, extra_args, stderr=log_stderr)
+ self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoind exited' in str(e) # node must have shutdown
@@ -432,7 +422,7 @@ class BitcoinTestFramework():
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
- os.remove(log_filename(self.options.cachedir, i, "db.log"))
+ os.remove(log_filename(self.options.cachedir, i, "wallets/db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 34b458482a..1054e6d028 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -10,11 +10,11 @@ import http.client
import json
import logging
import os
+import re
import subprocess
import time
from .authproxy import JSONRPCException
-from .mininode import NodeConn
from .util import (
assert_equal,
get_rpc_proxy,
@@ -23,6 +23,9 @@ from .util import (
p2p_port,
)
+# For Python 3.4 compatibility
+JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
+
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
@@ -39,7 +42,7 @@ class TestNode():
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
- def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
+ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
@@ -59,6 +62,7 @@ class TestNode():
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "bitcoin-cli"), self.datadir)
+ self.use_cli = use_cli
self.running = False
self.process = None
@@ -70,17 +74,20 @@ class TestNode():
self.p2ps = []
def __getattr__(self, name):
- """Dispatches any unrecognised messages to the RPC connection."""
- assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
- return getattr(self.rpc, name)
+ """Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
+ if self.use_cli:
+ return getattr(self.cli, name)
+ else:
+ assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
+ return getattr(self.rpc, name)
- def start(self, extra_args=None, stderr=None):
+ def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
- self.process = subprocess.Popen(self.args + extra_args, stderr=stderr)
+ self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
@@ -111,10 +118,13 @@ class TestNode():
raise AssertionError("Unable to connect to bitcoind")
def get_wallet_rpc(self, wallet_name):
- assert self.rpc_connected
- assert self.rpc
- wallet_path = "wallet/%s" % wallet_name
- return self.rpc / wallet_path
+ if self.use_cli:
+ return self.cli("-rpcwallet={}".format(wallet_name))
+ else:
+ assert self.rpc_connected
+ assert self.rpc
+ wallet_path = "wallet/%s" % wallet_name
+ return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
@@ -158,7 +168,7 @@ class TestNode():
self.encryptwallet(passphrase)
self.wait_until_stopped()
- def add_p2p_connection(self, p2p_conn, **kwargs):
+ def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
@@ -167,9 +177,9 @@ class TestNode():
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
+
+ p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
- kwargs.update({'callback': p2p_conn})
- p2p_conn.add_connection(NodeConn(**kwargs))
return p2p_conn
@@ -185,46 +195,73 @@ class TestNode():
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
- # Connection could have already been closed by other end.
- if p.connection is not None:
- p.connection.disconnect_node()
- self.p2ps = []
+ p.peer_disconnect()
+ del self.p2ps[:]
+
+class TestNodeCLIAttr:
+ def __init__(self, cli, command):
+ self.cli = cli
+ self.command = command
+
+ def __call__(self, *args, **kwargs):
+ return self.cli.send_cli(self.command, *args, **kwargs)
+ def get_request(self, *args, **kwargs):
+ return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
- self.args = []
+ self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
+ self.log = logging.getLogger('TestFramework.bitcoincli')
- def __call__(self, *args, input=None):
- # TestNodeCLI is callable with bitcoin-cli command-line args
- self.args = [str(arg) for arg in args]
- self.input = input
- return self
+ def __call__(self, *options, input=None):
+ # TestNodeCLI is callable with bitcoin-cli command-line options
+ cli = TestNodeCLI(self.binary, self.datadir)
+ cli.options = [str(o) for o in options]
+ cli.input = input
+ return cli
def __getattr__(self, command):
- def dispatcher(*args, **kwargs):
- return self.send_cli(command, *args, **kwargs)
- return dispatcher
-
- def send_cli(self, command, *args, **kwargs):
+ return TestNodeCLIAttr(self, command)
+
+ def batch(self, requests):
+ results = []
+ for request in requests:
+ try:
+ results.append(dict(result=request()))
+ except JSONRPCException as e:
+ results.append(dict(error=e))
+ return results
+
+ def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
- p_args = [self.binary, "-datadir=" + self.datadir] + self.args
+ p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
- p_args += [command] + pos_args + named_args
+ if command is not None:
+ p_args += [command]
+ p_args += pos_args + named_args
+ self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
+ match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
+ if match:
+ code, message = match.groups()
+ raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
- return json.loads(cli_stdout, parse_float=decimal.Decimal)
+ try:
+ return json.loads(cli_stdout, parse_float=decimal.Decimal)
+ except JSONDecodeError:
+ return cli_stdout.rstrip("\n")
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 102c903018..7fdc171332 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
@@ -390,7 +390,7 @@ def sync_chain(rpc_connections, *, wait=1, timeout=60):
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
-def sync_mempools(rpc_connections, *, wait=1, timeout=60):
+def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
@@ -402,6 +402,9 @@ def sync_mempools(rpc_connections, *, wait=1, timeout=60):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
+ if flush_scheduler:
+ for r in rpc_connections:
+ r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait