diff options
Diffstat (limited to 'test')
53 files changed, 1350 insertions, 705 deletions
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py index 3230d5cb6b..5bb3b5c094 100755 --- a/test/functional/combine_logs.py +++ b/test/functional/combine_logs.py @@ -2,7 +2,9 @@ """Combine logs from multiple bitcoin nodes as well as the test_framework log. This streams the combined log output to stdout. Use combine_logs.py > outputfile -to write to an outputfile.""" +to write to an outputfile. + +If no argument is provided, the most recent test directory will be used.""" import argparse from collections import defaultdict, namedtuple @@ -11,6 +13,13 @@ import itertools import os import re import sys +import tempfile + +# N.B.: don't import any local modules here - this script must remain executable +# without the parent module installed. + +# Should match same symbol in `test_framework.test_framework`. +TMPDIR_PREFIX = "bitcoin_func_test_" # Matches on the date format at the start of the log event TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z") @@ -19,22 +28,30 @@ LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event']) def main(): """Main function. Parses args, reads the log files and renders them as text or html.""" - - parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__) + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + 'testdir', nargs='?', default='', + help=('temporary test directory to combine logs from. ' + 'Defaults to the most recent')) parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)') parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2') - args, unknown_args = parser.parse_known_args() + args = parser.parse_args() if args.html and args.color: print("Only one out of --color or --html should be specified") sys.exit(1) - # There should only be one unknown argument - the path of the temporary test directory - if len(unknown_args) != 1: - print("Unexpected arguments" + str(unknown_args)) + testdir = args.testdir or find_latest_test_dir() + + if not testdir: + print("No test directories found") sys.exit(1) - log_events = read_logs(unknown_args[0]) + if not args.testdir: + print("Opening latest test directory: {}".format(testdir), file=sys.stderr) + + log_events = read_logs(testdir) print_logs(log_events, color=args.color, html=args.html) @@ -53,6 +70,29 @@ def read_logs(tmp_dir): return heapq.merge(*[get_log_events(source, f) for source, f in files]) + +def find_latest_test_dir(): + """Returns the latest tmpfile test directory prefix.""" + tmpdir = tempfile.gettempdir() + + def join_tmp(basename): + return os.path.join(tmpdir, basename) + + def is_valid_test_tmpdir(basename): + fullpath = join_tmp(basename) + return ( + os.path.isdir(fullpath) + and basename.startswith(TMPDIR_PREFIX) + and os.access(fullpath, os.R_OK) + ) + + testdir_paths = [ + join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name) + ] + + return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None + + def get_log_events(source, logfile): """Generator function that returns individual log events. diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py new file mode 100644 index 0000000000..02deae92f3 --- /dev/null +++ b/test/functional/data/invalid_txs.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Templates for constructing various sorts of invalid transactions. + +These templates (or an iterator over all of them) can be reused in different +contexts to test using a number of invalid transaction types. + +Hopefully this makes it easier to get coverage of a full variety of tx +validation checks through different interfaces (AcceptBlock, AcceptToMemPool, +etc.) without repeating ourselves. + +Invalid tx cases not covered here can be found by running: + + $ diff \ + <(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \ + <(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u) + +""" +import abc + +from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint +from test_framework import script as sc +from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS + +basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL]) + + +class BadTxTemplate: + """Allows simple construction of a certain kind of invalid tx. Base class to be subclassed.""" + __metaclass__ = abc.ABCMeta + + # The expected error code given by bitcoind upon submission of the tx. + reject_reason = "" + + # Only specified if it differs from mempool acceptance error. + block_reject_reason = "" + + # Do we expect to be disconnected after submitting this tx? + expect_disconnect = False + + # Is this tx considered valid when included in a block, but not for acceptance into + # the mempool (i.e. does it violate policy but not consensus)? + valid_in_block = False + + def __init__(self, *, spend_tx=None, spend_block=None): + self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx + self.spend_avail = sum(o.nValue for o in self.spend_tx.vout) + self.valid_txin = CTxIn(COutPoint(self.spend_tx.sha256, 0), b"", 0xffffffff) + + @abc.abstractmethod + def get_tx(self, *args, **kwargs): + """Return a CTransaction that is invalid per the subclass.""" + pass + + +class OutputMissing(BadTxTemplate): + reject_reason = "bad-txns-vout-empty" + expect_disconnect = False + + def get_tx(self): + tx = CTransaction() + tx.vin.append(self.valid_txin) + tx.calc_sha256() + return tx + + +class InputMissing(BadTxTemplate): + reject_reason = "bad-txns-vin-empty" + expect_disconnect = False + + def get_tx(self): + tx = CTransaction() + tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE] * 100))) + tx.calc_sha256() + return tx + + +class SizeTooSmall(BadTxTemplate): + reject_reason = "tx-size-small" + expect_disconnect = False + valid_in_block = True + + def get_tx(self): + tx = CTransaction() + tx.vin.append(self.valid_txin) + tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE]))) + tx.calc_sha256() + return tx + + +class BadInputOutpointIndex(BadTxTemplate): + # Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins + # database can't distinguish between spent outpoints and outpoints which never existed. + reject_reason = None + expect_disconnect = False + + def get_tx(self): + num_indices = len(self.spend_tx.vin) + bad_idx = num_indices + 100 + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256, bad_idx), b"", 0xffffffff)) + tx.vout.append(CTxOut(0, basic_p2sh)) + tx.calc_sha256() + return tx + + +class DuplicateInput(BadTxTemplate): + reject_reason = 'bad-txns-inputs-duplicate' + expect_disconnect = True + + def get_tx(self): + tx = CTransaction() + tx.vin.append(self.valid_txin) + tx.vin.append(self.valid_txin) + tx.vout.append(CTxOut(1, basic_p2sh)) + tx.calc_sha256() + return tx + + +class NonexistentInput(BadTxTemplate): + reject_reason = None # Added as an orphan tx. + expect_disconnect = False + + def get_tx(self): + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256 + 1, 0), b"", 0xffffffff)) + tx.vin.append(self.valid_txin) + tx.vout.append(CTxOut(1, basic_p2sh)) + tx.calc_sha256() + return tx + + +class SpendTooMuch(BadTxTemplate): + reject_reason = 'bad-txns-in-belowout' + expect_disconnect = True + + def get_tx(self): + return create_tx_with_script( + self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1)) + + +class SpendNegative(BadTxTemplate): + reject_reason = 'bad-txns-vout-negative' + expect_disconnect = True + + def get_tx(self): + return create_tx_with_script(self.spend_tx, 0, amount=-1) + + +class InvalidOPIFConstruction(BadTxTemplate): + reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" + expect_disconnect = True + valid_in_block = True + + def get_tx(self): + return create_tx_with_script( + self.spend_tx, 0, script_sig=b'\x64' * 35, + amount=(self.spend_avail // 2)) + + +class TooManySigops(BadTxTemplate): + reject_reason = "bad-txns-too-many-sigops" + block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount" + expect_disconnect = False + + def get_tx(self): + lotsa_checksigs = sc.CScript([sc.OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) + return create_tx_with_script( + self.spend_tx, 0, + script_pub_key=lotsa_checksigs, + amount=1) + + +def iter_all_templates(): + """Iterate through all bad transaction template types.""" + return BadTxTemplate.__subclasses__() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 3d0467038d..12a4ce9aff 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -180,7 +180,7 @@ class AssumeValidTest(BitcoinTestFramework): for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. - p2p1.sync_with_ping(120) + p2p1.sync_with_ping(150) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index e50f67a345..697a0b19ac 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -7,7 +7,13 @@ import copy import struct import time -from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block +from test_framework.blocktools import ( + create_block, + create_coinbase, + create_tx_with_script, + get_legacy_sigopcount_block, + MAX_BLOCK_SIGOPS, +) from test_framework.key import CECKey from test_framework.messages import ( CBlock, @@ -45,8 +51,7 @@ from test_framework.script import ( ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal - -MAX_BLOCK_SIGOPS = 20000 +from data import invalid_txs # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). @@ -95,16 +100,21 @@ class FullBlockTest(BitcoinTestFramework): self.save_spendable_output() self.sync_blocks([b0]) + # These constants chosen specifically to trigger an immature coinbase spend + # at a certain time below. + NUM_BUFFER_BLOCKS_TO_GENERATE = 99 + NUM_OUTPUTS_TO_COLLECT = 33 + # Allow the block to mature blocks = [] - for i in range(99): - blocks.append(self.next_block(5000 + i)) + for i in range(NUM_BUFFER_BLOCKS_TO_GENERATE): + blocks.append(self.next_block("maturitybuffer.{}".format(i))) self.save_spendable_output() self.sync_blocks(blocks) # collect spendable outputs now to avoid cluttering the code later on out = [] - for i in range(33): + for i in range(NUM_OUTPUTS_TO_COLLECT): out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is @@ -116,7 +126,39 @@ class FullBlockTest(BitcoinTestFramework): b2 = self.next_block(2, spend=out[1]) self.save_spendable_output() - self.sync_blocks([b1, b2]) + self.sync_blocks([b1, b2], timeout=4) + + # Select a txn with an output eligible for spending. This won't actually be spent, + # since we're testing submission of a series of blocks with invalid txns. + attempt_spend_tx = out[2] + + # Submit blocks for rejection, each of which contains a single transaction + # (aside from coinbase) which should be considered invalid. + for TxTemplate in invalid_txs.iter_all_templates(): + template = TxTemplate(spend_tx=attempt_spend_tx) + + # Something about the serialization code for missing inputs creates + # a different hash in the test client than on bitcoind, resulting + # in a mismatching merkle root during block validation. + # Skip until we figure out what's going on. + if TxTemplate == invalid_txs.InputMissing: + continue + if template.valid_in_block: + continue + + self.log.info("Reject block with invalid tx: %s", TxTemplate.__name__) + blockname = "for_invalid.%s" % TxTemplate.__name__ + badblock = self.next_block(blockname) + badtx = template.get_tx() + self.sign_tx(badtx, attempt_spend_tx) + badtx.rehash() + badblock = self.update_block(blockname, [badtx]) + self.sync_blocks( + [badblock], success=False, + reject_reason=(template.block_reject_reason or template.reject_reason), + reconnect=True, timeout=2) + + self.move_tip(2) # Fork like this: # @@ -1181,7 +1223,7 @@ class FullBlockTest(BitcoinTestFramework): self.save_spendable_output() spend = self.get_spendable_output() - self.sync_blocks(blocks, True, timeout=180) + self.sync_blocks(blocks, True, timeout=480) chain1_tip = i # now create alt chain of same length @@ -1193,14 +1235,14 @@ class FullBlockTest(BitcoinTestFramework): # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1)) - self.sync_blocks([block], True, timeout=180) + self.sync_blocks([block], True, timeout=480) # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1) self.sync_blocks([block], False, force_send=True) block = self.next_block(chain1_tip + 2) - self.sync_blocks([block], True, timeout=180) + self.sync_blocks([block], True, timeout=480) # Helper methods ################ @@ -1288,7 +1330,7 @@ class FullBlockTest(BitcoinTestFramework): self.blocks[block_number] = block return block - def bootstrap_p2p(self): + def bootstrap_p2p(self, timeout=10): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" @@ -1299,15 +1341,15 @@ class FullBlockTest(BitcoinTestFramework): # an INV for the next block and receive two getheaders - one for the # IBD and one for the INV. We'd respond to both and could get # unexpectedly disconnected if the DoS score for that error is 50. - self.nodes[0].p2p.wait_for_getheaders(timeout=5) + self.nodes[0].p2p.wait_for_getheaders(timeout=timeout) - def reconnect_p2p(self): + def reconnect_p2p(self, timeout=60): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() - self.bootstrap_p2p() + self.bootstrap_p2p(timeout=timeout) def sync_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=60): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. @@ -1316,7 +1358,7 @@ class FullBlockTest(BitcoinTestFramework): self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect) if reconnect: - self.reconnect_p2p() + self.reconnect_p2p(timeout=timeout) if __name__ == '__main__': diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 70d67aa53a..8b06cc7372 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -46,7 +46,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): self.num_nodes = 4 self.setup_clean_chain = False # Need a bit of extra time for the nodes to start up for this test - self.rpc_timewait = 90 + self.rpc_timeout = 90 # Set -maxmempool=0 to turn off mempool memory sharing with dbcache # Set -rpcservertimeout=900 to reduce socket disconnects in this diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index c162f46d63..9a3f4fae45 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -29,7 +29,7 @@ class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 - self.rpc_timewait = 900 + self.rpc_timeout = 900 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. @@ -191,6 +191,8 @@ class PruneTest(BitcoinTestFramework): def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) + with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']): + self.nodes[2].verifychain(checklevel=4, nblocks=0) self.log.info("Will need to redownload block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 7098a03f1e..4bcdf9af55 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -90,7 +90,7 @@ class SegWitTest(BitcoinTestFramework): self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - tmpl = self.nodes[0].getblocktemplate({}) + tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']}) assert(tmpl['sizelimit'] == 1000000) assert('weightlimit' not in tmpl) assert(tmpl['sigoplimit'] == 20000) @@ -232,15 +232,7 @@ class SegWitTest(BitcoinTestFramework): assert(tx.wit.is_null()) assert(txid3 in self.nodes[0].getrawmempool()) - # Now try calling getblocktemplate() without segwit support. - template = self.nodes[0].getblocktemplate() - - # Check that tx1 is the only transaction of the 3 in the template. - template_txids = [t['txid'] for t in template['transactions']] - assert(txid2 not in template_txids and txid3 not in template_txids) - assert(txid1 in template_txids) - - # Check that running with segwit support results in all 3 being included. + # Check that getblocktemplate includes all transactions. template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) template_txids = [t['txid'] for t in template['transactions']] assert(txid1 in template_txids) diff --git a/test/functional/feature_shutdown.py b/test/functional/feature_shutdown.py new file mode 100755 index 0000000000..b633fabb1f --- /dev/null +++ b/test/functional/feature_shutdown.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test bitcoind shutdown.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, get_rpc_proxy +from threading import Thread + +def test_long_call(node): + block = node.waitfornewblock() + assert_equal(block['height'], 0) + +class ShutdownTest(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir) + Thread(target=test_long_call, args=(node,)).start() + # wait 1 second to ensure event loop waits for current connections to close + self.stop_node(0, wait=1000) + +if __name__ == '__main__': + ShutdownTest().main() diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py index e4b86f9e1e..20889366e5 100755 --- a/test/functional/interface_http.py +++ b/test/functional/interface_http.py @@ -31,13 +31,13 @@ class HTTPBasicsTest (BitcoinTestFramework): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! conn.close() #same should be if we add keep-alive because this should be the std. behaviour @@ -48,13 +48,13 @@ class HTTPBasicsTest (BitcoinTestFramework): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! conn.close() #now do the same with "Connection: close" @@ -65,7 +65,7 @@ class HTTPBasicsTest (BitcoinTestFramework): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock==None) #now the connection must be closed after the response + assert(conn.sock is None) #now the connection must be closed after the response #node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) @@ -88,7 +88,7 @@ class HTTPBasicsTest (BitcoinTestFramework): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default + assert(conn.sock is not None) #connection must be closed because bitcoind should use keep-alive by default # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 8847777ba7..e2a219b85a 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -1,10 +1,12 @@ #!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers +# Copyright (c) 2017-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool acceptance of raw transactions.""" from io import BytesIO +import math + from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import ( BIP125_SEQUENCE_NUMBER, @@ -56,6 +58,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): self.mempool_size = 0 wait_until(lambda: node.getblockcount() == 200) assert_equal(node.getmempoolinfo()['size'], self.mempool_size) + coins = node.listunspent() self.log.info('Should not accept garbage to testmempoolaccept') assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar')) @@ -63,13 +66,14 @@ class MempoolAcceptanceTest(BitcoinTestFramework): assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar'])) self.log.info('A transaction already in the blockchain') - coin = node.listunspent()[0] # Pick a random coin(base) to spend + coin = coins.pop() # Pick a random coin(base) to spend raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction( inputs=[{'txid': coin['txid'], 'vout': coin['vout']}], outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}], ))['hex'] txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True) node.generate(1) + self.mempool_size = 0 self.check_mempool_result( result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}], rawtxs=[raw_tx_in_block], @@ -89,9 +93,25 @@ class MempoolAcceptanceTest(BitcoinTestFramework): rawtxs=[raw_tx_0], ) + self.log.info('A final transaction not in the mempool') + coin = coins.pop() # Pick a random coin(base) to spend + raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction( + inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL + outputs=[{node.getnewaddress(): 0.025}], + locktime=node.getblockcount() + 2000, # Can be anything + ))['hex'] + tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final))) + self.check_mempool_result( + result_expected=[{'txid': tx.rehash(), 'allowed': True}], + rawtxs=[bytes_to_hex_str(tx.serialize())], + allowhighfees=True, + ) + node.sendrawtransaction(hexstring=raw_tx_final, allowhighfees=True) + self.mempool_size += 1 + self.log.info('A transaction in the mempool') node.sendrawtransaction(hexstring=raw_tx_0) - self.mempool_size = 1 + self.mempool_size += 1 self.check_mempool_result( result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}], rawtxs=[raw_tx_0], @@ -181,7 +201,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): self.log.info('A really large transaction') tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) - tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize())) + tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize())) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}], rawtxs=[bytes_to_hex_str(tx.serialize())], diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index b4e9d967fd..d74d4eaaf1 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -42,6 +42,7 @@ import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until + class MempoolPersistTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 @@ -60,7 +61,7 @@ class MempoolPersistTest(BitcoinTestFramework): self.log.debug("Send 5 transactions from node2 (to its own address)") for i in range(5): - self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) + last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) node2_balance = self.nodes[2].getbalance() self.sync_all() @@ -68,6 +69,13 @@ class MempoolPersistTest(BitcoinTestFramework): assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) + self.log.debug("Prioritize a transaction on node0") + fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] + assert_equal(fees['base'], fees['modified']) + self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000) + fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] + assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified']) + self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.") self.stop_nodes() # Give this node a head-start, so we can be "extra-sure" that it didn't load anything later @@ -81,6 +89,10 @@ class MempoolPersistTest(BitcoinTestFramework): # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now: assert_equal(len(self.nodes[1].getrawmempool()), 0) + self.log.debug('Verify prioritization is loaded correctly') + fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] + assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified']) + # Verify accounting of mempool transactions after restart is correct self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet assert_equal(node2_balance, self.nodes[2].getbalance()) diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index 9f01be0646..661d9f4c97 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -25,12 +25,12 @@ from test_framework.util import ( assert_raises_rpc_error, bytes_to_hex_str as b2x, ) - +from test_framework.script import CScriptNum def assert_template(node, block, expect, rehash=True): if rehash: block.hashMerkleRoot = block.calc_merkle_root() - rsp = node.getblocktemplate(template_request={'data': b2x(block.serialize()), 'mode': 'proposal'}) + rsp = node.getblocktemplate(template_request={'data': b2x(block.serialize()), 'mode': 'proposal', 'rules': ['segwit']}) assert_equal(rsp, expect) @@ -60,16 +60,24 @@ class MiningTest(BitcoinTestFramework): # Mine a block to leave initial block download node.generatetoaddress(1, node.get_deterministic_priv_key().address) - tmpl = node.getblocktemplate() + tmpl = node.getblocktemplate({'rules': ['segwit']}) self.log.info("getblocktemplate: Test capability advertised") assert 'proposal' in tmpl['capabilities'] assert 'coinbasetxn' not in tmpl - coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1) + next_height = int(tmpl["height"]) + coinbase_tx = create_coinbase(height=next_height) # sequence numbers must not be max for nLockTime to have effect coinbase_tx.vin[0].nSequence = 2 ** 32 - 2 coinbase_tx.rehash() + # round-trip the encoded bip34 block height commitment + assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), next_height) + # round-trip negative and multi-byte CScriptNums to catch python regression + assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))), 1500) + assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))), -1500) + assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1) + block = CBlock() block.nVersion = tmpl["version"] block.hashPrevBlock = int(tmpl["previousblockhash"], 16) @@ -78,6 +86,9 @@ class MiningTest(BitcoinTestFramework): block.nNonce = 0 block.vtx = [coinbase_tx] + self.log.info("getblocktemplate: segwit rule must be set") + assert_raises_rpc_error(-8, "getblocktemplate must be called with the segwit rule set", node.getblocktemplate) + self.log.info("getblocktemplate: Test valid block") assert_template(node, block, None) @@ -94,7 +105,7 @@ class MiningTest(BitcoinTestFramework): assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize())) self.log.info("getblocktemplate: Test truncated final transaction") - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'}) + assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal', 'rules': ['segwit']}) self.log.info("getblocktemplate: Test duplicate transaction") bad_block = copy.deepcopy(block) @@ -124,7 +135,7 @@ class MiningTest(BitcoinTestFramework): bad_block_sn = bytearray(block.serialize()) assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1) bad_block_sn[TX_COUNT_OFFSET] += 1 - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'}) + assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal', 'rules': ['segwit']}) self.log.info("getblocktemplate: Test bad bits") bad_block = copy.deepcopy(block) diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index 9a3c15a4a7..72cde8e811 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -15,14 +15,14 @@ class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid - template = node.getblocktemplate() + template = node.getblocktemplate({'rules': ['segwit']}) self.longpollid = template['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir) def run(self): - self.node.getblocktemplate({'longpollid':self.longpollid}) + self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']}) class GetBlockTemplateLPTest(BitcoinTestFramework): def set_test_params(self): @@ -34,10 +34,10 @@ class GetBlockTemplateLPTest(BitcoinTestFramework): def run_test(self): self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") self.nodes[0].generate(10) - template = self.nodes[0].getblocktemplate() + template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) longpollid = template['longpollid'] # longpollid should not change between successive invocations if nothing else happens - template2 = self.nodes[0].getblocktemplate() + template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']}) assert(template2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index c5ddee56f1..ca4b621a78 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -84,7 +84,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework): high_fee_tx = x # Something high-fee should have been mined! - assert(high_fee_tx != None) + assert(high_fee_tx is not None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). @@ -142,10 +142,10 @@ class PrioritiseTransactionTest(BitcoinTestFramework): # getblocktemplate to (eventually) return a new block. mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) - template = self.nodes[0].getblocktemplate() + template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN)) self.nodes[0].setmocktime(mock_time+10) - new_template = self.nodes[0].getblocktemplate() + new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) assert(template != new_template) diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py index c8c752d1f7..33b7060060 100755 --- a/test/functional/p2p_invalid_locator.py +++ b/test/functional/p2p_invalid_locator.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers +# Copyright (c) 2015-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid locators. diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index a2d40fab1a..dbc5c5fff6 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -3,6 +3,7 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" +import os import struct from test_framework import messages @@ -66,7 +67,10 @@ class InvalidMessagesTest(BitcoinTestFramework): msg_at_size = msg_unrecognized("b" * valid_data_limit) assert len(msg_at_size.serialize()) == msg_limit - with node.assert_memory_usage_stable(perc_increase_allowed=0.03): + increase_allowed = 0.5 + if [s for s in os.environ.get("BITCOIN_CONFIG", "").split(" ") if "--with-sanitizers" in s and "address" in s]: + increase_allowed = 3.5 + with node.assert_memory_usage_stable(increase_allowed=increase_allowed): self.log.info( "Sending a bunch of large, junk messages to test " "memory exhaustion. May take a bit...") @@ -82,7 +86,7 @@ class InvalidMessagesTest(BitcoinTestFramework): # Peer 1, despite serving up a bunch of nonsense, should still be connected. self.log.info("Waiting for node to drop junk messages.") - node.p2p.sync_with_ping(timeout=30) + node.p2p.sync_with_ping(timeout=120) assert node.p2p.is_connected # diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 58e129b57d..1b18dd3e58 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -5,7 +5,7 @@ """Test node responses to invalid transactions. In this test we connect to one node over p2p, and test tx requests.""" -from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script +from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( COIN, COutPoint, @@ -19,6 +19,7 @@ from test_framework.util import ( assert_equal, wait_until, ) +from data import invalid_txs class InvalidTxRequestTest(BitcoinTestFramework): @@ -63,12 +64,21 @@ class InvalidTxRequestTest(BitcoinTestFramework): self.log.info("Mature the block.") self.nodes[0].generatetoaddress(100, self.nodes[0].get_deterministic_priv_key().address) - # b'\x64' is OP_NOTIF - # Transaction will be rejected with code 16 (REJECT_INVALID) - # and we get disconnected immediately - self.log.info('Test a transaction that is rejected') - tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x64' * 35, amount=50 * COIN - 12000) - node.p2p.send_txs_and_test([tx1], node, success=False, expect_disconnect=True) + # Iterate through a list of known invalid transaction types, ensuring each is + # rejected. Some are consensus invalid and some just violate policy. + for BadTxTemplate in invalid_txs.iter_all_templates(): + self.log.info("Testing invalid transaction: %s", BadTxTemplate.__name__) + template = BadTxTemplate(spend_block=block1) + tx = template.get_tx() + node.p2p.send_txs_and_test( + [tx], node, success=False, + expect_disconnect=template.expect_disconnect, + reject_reason=template.reject_reason, + ) + + if template.expect_disconnect: + self.log.info("Reconnecting to peer") + self.reconnect_p2p() # Make two p2p connections to provide the node with orphans # * p2ps[0] will send valid orphan txs (one with low fee) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index afbbfa8992..d95da227e5 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -545,31 +545,13 @@ class SegWitTest(BitcoinTestFramework): @subtest def test_getblocktemplate_before_lockin(self): - # Node0 is segwit aware, node2 is not. - for node in [self.nodes[0], self.nodes[2]]: - gbt_results = node.getblocktemplate() - block_version = gbt_results['version'] - # If we're not indicating segwit support, we will still be - # signalling for segwit activation. - assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0]) - # If we don't specify the segwit rule, then we won't get a default - # commitment. - assert('default_witness_commitment' not in gbt_results) - - # Workaround: - # Can either change the tip, or change the mempool and wait 5 seconds - # to trigger a recomputation of getblocktemplate. txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) - # Using mocktime lets us avoid sleep() - sync_mempools(self.nodes) - self.nodes[0].setmocktime(int(time.time()) + 10) - self.nodes[2].setmocktime(int(time.time()) + 10) for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate({"rules": ["segwit"]}) block_version = gbt_results['version'] if node == self.nodes[2]: - # If this is a non-segwit node, we should still not get a witness + # If this is a non-segwit node, we should not get a witness # commitment, nor a version bit signalling segwit. assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) assert('default_witness_commitment' not in gbt_results) @@ -586,10 +568,6 @@ class SegWitTest(BitcoinTestFramework): script = get_witness_script(witness_root, 0) assert_equal(witness_commitment, bytes_to_hex_str(script)) - # undo mocktime - self.nodes[0].setmocktime(0) - self.nodes[2].setmocktime(0) - @subtest def advance_to_segwit_lockin(self): """Mine enough blocks to lock in segwit, but don't activate.""" diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index 2459a9f243..02ceec3dc1 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -14,11 +14,11 @@ - Wait 1 second - Assert that we're connected - Send a ping to no_verack_node and no_version_node -- Wait 30 seconds +- Wait 1 second - Assert that we're still connected - Send a ping to no_verack_node and no_version_node -- Wait 31 seconds -- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) +- Wait 2 seconds +- Assert that we're no longer connected (timeout to receive version/verack is 3 seconds) """ from time import sleep @@ -36,6 +36,8 @@ class TimeoutsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 + # set timeout to receive version/verack to 3 seconds + self.extra_args = [["-peertimeout=3"]] def run_test(self): # Setup the p2p connections @@ -52,7 +54,7 @@ class TimeoutsTest(BitcoinTestFramework): no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) - sleep(30) + sleep(1) assert "version" in no_verack_node.last_message @@ -63,11 +65,21 @@ class TimeoutsTest(BitcoinTestFramework): no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) - sleep(31) - - assert not no_verack_node.is_connected - assert not no_version_node.is_connected - assert not no_send_node.is_connected + expected_timeout_logs = [ + "version handshake timeout from 0", + "socket no message in first 3 seconds, 1 0 from 1", + "socket no message in first 3 seconds, 0 0 from 2", + ] + + with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs): + sleep(3) + # By now, we waited a total of 5 seconds. Off-by-two for two + # reasons: + # * The internal precision is one second + # * Account for network delay + assert not no_verack_node.is_connected + assert not no_version_node.is_connected + assert not no_send_node.is_connected if __name__ == '__main__': TimeoutsTest().main() diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py index 53916d5290..3938ca98dd 100755 --- a/test/functional/rpc_bind.py +++ b/test/functional/rpc_bind.py @@ -48,9 +48,12 @@ class RPCBindTest(BitcoinTestFramework): at a non-localhost IP. ''' self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport)) - base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips] + node_args = \ + ['-disablewallet', '-nolisten'] + \ + ['-rpcallowip='+x for x in allow_ips] + \ + ['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang self.nodes[0].rpchost = None - self.start_nodes([base_args]) + self.start_nodes([node_args]) # connect to node through non-loopback interface node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir) node.getnetworkinfo() @@ -67,7 +70,7 @@ class RPCBindTest(BitcoinTestFramework): self.log.info("Check for ipv6") have_ipv6 = test_ipv6_local() - if not have_ipv6 and not self.options.run_ipv4: + if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback): raise SkipTest("This test requires ipv6 support.") self.log.info("Check for non-loopback interface") @@ -101,9 +104,9 @@ class RPCBindTest(BitcoinTestFramework): # check default without rpcallowip (IPv4 and IPv6 localhost) self.run_bind_test(None, '127.0.0.1', [], [('127.0.0.1', self.defaultport), ('::1', self.defaultport)]) - # check default with rpcallowip (IPv6 any) + # check default with rpcallowip (IPv4 and IPv6 localhost) self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], - [('::0', self.defaultport)]) + [('127.0.0.1', self.defaultport), ('::1', self.defaultport)]) # check only IPv6 localhost (explicit) self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], [('::1', self.defaultport)]) diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 0c61e9ab62..4f350953b2 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -32,7 +32,7 @@ class RawTransactionsTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def setup_network(self, split=False): + def setup_network(self): self.setup_nodes() connect_nodes_bi(self.nodes, 0, 1) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 0affddcf05..b12eb1d9ec 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -67,8 +67,8 @@ class NetTest(BitcoinTestFramework): peer_info_after_ping = self.nodes[0].getpeerinfo() for before, after in zip(peer_info, peer_info_after_ping): - assert_greater_than_or_equal(after['bytesrecv_per_msg']['pong'], before['bytesrecv_per_msg']['pong'] + 32) - assert_greater_than_or_equal(after['bytessent_per_msg']['ping'], before['bytessent_per_msg']['ping'] + 32) + assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32) + assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32) def _test_getnetworkinginfo(self): assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 04d9bb65a6..272ebe65cb 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -210,6 +210,10 @@ class PSBTTest(BitcoinTestFramework): assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE assert_equal(decoded_psbt["tx"]["locktime"], 0) + # Make sure change address wallet does not have P2SH innerscript access to results in success + # when attempting BnB coin selection + self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False) + # Regression test for 14473 (mishandling of already-signed witness transaction): psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}]) complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"]) diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 8ed490f552..5b9dbef68d 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -47,7 +47,7 @@ class RawTransactionsTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def setup_network(self, split=False): + def setup_network(self): super().setup_network() connect_nodes_bi(self.nodes, 0, 2) @@ -100,6 +100,8 @@ class RawTransactionsTest(BitcoinTestFramework): assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}]) + assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}]) + assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")])) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']]) @@ -127,19 +129,12 @@ class RawTransactionsTest(BitcoinTestFramework): bytes_to_hex_str(tx.serialize()), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]), ) - # Two data outputs - tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')]))))) - assert_equal(len(tx.vout), 2) - assert_equal( - bytes_to_hex_str(tx.serialize()), - self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{'data': '99'}, {'data': '99'}]), - ) # Multiple mixed outputs - tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')]))))) + tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')]))))) assert_equal(len(tx.vout), 3) assert_equal( bytes_to_hex_str(tx.serialize()), - self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {'data': '99'}, {'data': '99'}]), + self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]), ) for type in ["bech32", "p2sh-segwit", "legacy"]: diff --git a/test/functional/rpc_scantxoutset.py b/test/functional/rpc_scantxoutset.py index 881b839a4e..11b4db6ec5 100755 --- a/test/functional/rpc_scantxoutset.py +++ b/test/functional/rpc_scantxoutset.py @@ -10,6 +10,9 @@ from decimal import Decimal import shutil import os +def descriptors(out): + return sorted(u['desc'] for u in out['unspents']) + class ScantxoutsetTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -93,5 +96,10 @@ class ScantxoutsetTest(BitcoinTestFramework): assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288")) assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672")) + # Test the reported descriptors for a few matches + assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)"]) + assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)"]) + assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)']) + if __name__ == '__main__': ScantxoutsetTest().main() diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 81cce1167b..7679ea5398 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -41,6 +41,8 @@ from .script import ( from .util import assert_equal from io import BytesIO +MAX_BLOCK_SIGOPS = 20000 + # From BIP141 WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index c72cb8835c..356a45d6d0 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -450,6 +450,8 @@ class CTransaction: if flags != 0: self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] self.wit.deserialize(f) + else: + self.wit = CTxWitness() self.nLockTime = struct.unpack("<I", f.read(4))[0] self.sha256 = None self.hash = None @@ -764,7 +766,7 @@ class HeaderAndShortIDs: self.prefilled_txn = [] self.use_witness = False - if p2pheaders_and_shortids != None: + if p2pheaders_and_shortids is not None: self.header = p2pheaders_and_shortids.header self.nonce = p2pheaders_and_shortids.nonce self.shortids = p2pheaders_and_shortids.shortids @@ -822,7 +824,7 @@ class BlockTransactionsRequest: def __init__(self, blockhash=0, indexes = None): self.blockhash = blockhash - self.indexes = indexes if indexes != None else [] + self.indexes = indexes if indexes is not None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) @@ -863,7 +865,7 @@ class BlockTransactions: def __init__(self, blockhash=0, transactions = None): self.blockhash = blockhash - self.transactions = transactions if transactions != None else [] + self.transactions = transactions if transactions is not None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) @@ -1052,7 +1054,7 @@ class msg_getdata: command = b"getdata" def __init__(self, inv=None): - self.inv = inv if inv != None else [] + self.inv = inv if inv is not None else [] def deserialize(self, f): self.inv = deser_vector(f, CInv) diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 2fe44010ba..012c80a1be 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -385,6 +385,22 @@ class CScriptNum: r[-1] |= 0x80 return bytes([len(r)]) + r + @staticmethod + def decode(vch): + result = 0 + # We assume valid push_size and minimal encoding + value = vch[1:] + if len(value) == 0: + return result + for i, byte in enumerate(value): + result |= int(byte) << 8*i + if value[-1] >= 0x80: + # Mask for all but the highest result bit + num_mask = (2**(len(value)*8) - 1) >> 1 + result &= num_mask + result *= -1 + return result + class CScript(bytes): """Serialized script @@ -434,6 +450,10 @@ class CScript(bytes): # join makes no sense for a CScript() raise NotImplementedError + # Python 3.4 compatibility + def hex(self): + return hexlify(self).decode('ascii') + def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 44fc185e6d..352fa32b5b 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -43,6 +43,8 @@ TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 +TMPDIR_PREFIX = "bitcoin_func_test_" + class SkipTest(Exception): """This exception is raised to skip a test""" @@ -93,7 +95,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.nodes = [] self.network_thread = None self.mocktime = 0 - self.rpc_timewait = 60 # Wait for up to 60 seconds for the RPC server to respond + self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond self.supports_cli = False self.bind_to_localhost_only = True self.set_test_params() @@ -151,7 +153,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: - self.options.tmpdir = tempfile.mkdtemp(prefix="test") + self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() self.log.debug('Setting up network thread') @@ -279,7 +281,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None): - """Instantiate TestNode objects""" + """Instantiate TestNode objects. + + Should only be called once after the nodes have been specified in + set_test_params().""" if self.bind_to_localhost_only: extra_confs = [["bind=127.0.0.1"]] * num_nodes else: @@ -292,7 +297,19 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): - self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), rpchost=rpchost, timewait=self.rpc_timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli)) + self.nodes.append(TestNode( + i, + get_datadir_path(self.options.tmpdir, i), + rpchost=rpchost, + timewait=self.rpc_timeout, + bitcoind=binary[i], + bitcoin_cli=self.options.bitcoincli, + mocktime=self.mocktime, + coverage_dir=self.options.coveragedir, + extra_conf=extra_confs[i], + extra_args=extra_args[i], + use_cli=self.options.usecli, + )) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" @@ -325,16 +342,16 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): for node in self.nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) - def stop_node(self, i, expected_stderr=''): + def stop_node(self, i, expected_stderr='', wait=0): """Stop a bitcoind test node""" - self.nodes[i].stop_node(expected_stderr) + self.nodes[i].stop_node(expected_stderr, wait=wait) self.nodes[i].wait_until_stopped() - def stop_nodes(self): + def stop_nodes(self, wait=0): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes - node.stop_node() + node.stop_node(wait=wait) for node in self.nodes: # Wait for nodes to stop @@ -371,21 +388,6 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): sync_blocks(group) sync_mempools(group) - def enable_mocktime(self): - """Enable mocktime for the script. - - mocktime may be needed for scripts that use the cached version of the - blockchain. If the cached version of the blockchain is used without - mocktime then the mempools will not sync due to IBD. - - For backward compatibility of the python scripts with previous - versions of the cache, this helper function sets mocktime to Jan 1, - 2014 + (201 * 10 * 60)""" - self.mocktime = 1388534400 + (201 * 10 * 60) - - def disable_mocktime(self): - self.mocktime = 0 - # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): @@ -443,7 +445,18 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet'] if i > 0: args.append("-connect=127.0.0.1:" + str(p2p_port(0))) - self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], rpchost=None, timewait=self.rpc_timewait, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=None)) + self.nodes.append(TestNode( + i, + get_datadir_path(self.options.cachedir, i), + extra_conf=["bind=127.0.0.1"], + extra_args=[], + rpchost=None, + timewait=self.rpc_timeout, + bitcoind=self.options.bitcoind, + bitcoin_cli=self.options.bitcoincli, + mocktime=self.mocktime, + coverage_dir=None, + )) self.nodes[i].args = args self.start_node(i) @@ -451,6 +464,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): for node in self.nodes: node.wait_for_rpc_connection() + # For backward compatibility of the python scripts with previous + # versions of the cache, set mocktime to Jan 1, + # 2014 + (201 * 10 * 60)""" + self.mocktime = 1388534400 + (201 * 10 * 60) + # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of @@ -458,7 +476,6 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past - self.enable_mocktime() block_time = self.mocktime - (201 * 10 * 60) for i in range(2): for peer in range(4): @@ -472,7 +489,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # Shut them down, and clean up cache directories: self.stop_nodes() self.nodes = [] - self.disable_mocktime() + self.mocktime = 0 def cache_path(n, *paths): return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 9dcc0e6d0e..031a8824b1 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -68,7 +68,7 @@ class TestNode(): self.rpc_timeout = timewait self.binary = bitcoind self.coverage_dir = coverage_dir - if extra_conf != None: + if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. @@ -115,7 +115,7 @@ class TestNode(): ] return PRIV_KEYS[self.index] - def get_mem_rss(self): + def get_mem_rss_kilobytes(self): """Get the memory usage (RSS) per `ps`. Returns None if `ps` is unavailable. @@ -228,13 +228,13 @@ class TestNode(): wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) return self.rpc / wallet_path - def stop_node(self, expected_stderr=''): + def stop_node(self, expected_stderr='', wait=0): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: - self.stop() + self.stop(wait=wait) except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") @@ -291,15 +291,19 @@ class TestNode(): self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log)) @contextlib.contextmanager - def assert_memory_usage_stable(self, perc_increase_allowed=0.03): + def assert_memory_usage_stable(self, *, increase_allowed=0.03): """Context manager that allows the user to assert that a node's memory usage (RSS) hasn't increased beyond some threshold percentage. + + Args: + increase_allowed (float): the fractional increase in memory allowed until failure; + e.g. `0.12` for up to 12% increase allowed. """ - before_memory_usage = self.get_mem_rss() + before_memory_usage = self.get_mem_rss_kilobytes() yield - after_memory_usage = self.get_mem_rss() + after_memory_usage = self.get_mem_rss_kilobytes() if not (before_memory_usage and after_memory_usage): self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.") @@ -307,10 +311,10 @@ class TestNode(): perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1 - if perc_increase_memory_usage > perc_increase_allowed: + if perc_increase_memory_usage > increase_allowed: self._raise_assertion_error( "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format( - perc_increase_allowed * 100, before_memory_usage, after_memory_usage, + increase_allowed * 100, before_memory_usage, after_memory_usage, perc_increase_memory_usage * 100)) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index b355816d8b..d0a78d8dfd 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -326,7 +326,7 @@ def get_auth_cookie(datadir): if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") - if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): + if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK): with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f: userpass = f.read() split_userpass = userpass.split(':') diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index da55a3a156..a094433942 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -153,6 +153,7 @@ BASE_SCRIPTS = [ 'wallet_importprunedfunds.py', 'p2p_leak_tx.py', 'rpc_signmessage.py', + 'wallet_balance.py', 'feature_nulldummy.py', 'mempool_accept.py', 'wallet_import_rescan.py', @@ -175,6 +176,7 @@ BASE_SCRIPTS = [ 'rpc_getblockstats.py', 'p2p_fingerprint.py', 'feature_uacomment.py', + 'wallet_coinbase_category.py', 'feature_filelock.py', 'p2p_unrequested_blocks.py', 'feature_includeconf.py', @@ -185,6 +187,7 @@ BASE_SCRIPTS = [ 'feature_config_args.py', 'rpc_help.py', 'feature_help.py', + 'feature_shutdown.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] @@ -272,7 +275,7 @@ def main(): if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. - tests = [re.sub("\.py$", "", test) + ".py" for test in tests] + tests = [test + ".py" if ".py" not in test else test for test in tests] for test in tests: if test in ALL_SCRIPTS: test_list.append(test) diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py index 0f75045c9d..bafa556aad 100755 --- a/test/functional/wallet_address_types.py +++ b/test/functional/wallet_address_types.py @@ -99,6 +99,8 @@ class AddressTypeTest(BitcoinTestFramework): """Run sanity checks on an address.""" info = self.nodes[node].getaddressinfo(address) assert(self.nodes[node].validateaddress(address)['isvalid']) + assert_equal(info.get('solvable'), True) + if not multisig and typ == 'legacy': # P2PKH assert(not info['isscript']) @@ -146,6 +148,47 @@ class AddressTypeTest(BitcoinTestFramework): # Unknown type assert(False) + def test_desc(self, node, address, multisig, typ, utxo): + """Run sanity checks on a descriptor reported by getaddressinfo.""" + info = self.nodes[node].getaddressinfo(address) + assert('desc' in info) + assert_equal(info['desc'], utxo['desc']) + assert(self.nodes[node].validateaddress(address)['isvalid']) + + # Use a ridiculously roundabout way to find the key origin info through + # the PSBT logic. However, this does test consistency between the PSBT reported + # fingerprints/paths and the descriptor logic. + psbt = self.nodes[node].createpsbt([{'txid':utxo['txid'], 'vout':utxo['vout']}],[{address:0.00010000}]) + psbt = self.nodes[node].walletprocesspsbt(psbt, False, "ALL", True) + decode = self.nodes[node].decodepsbt(psbt['psbt']) + key_descs = {} + for deriv in decode['inputs'][0]['bip32_derivs']: + assert_equal(len(deriv['master_fingerprint']), 8) + assert_equal(deriv['path'][0], 'm') + key_descs[deriv['pubkey']] = '[' + deriv['master_fingerprint'] + deriv['path'][1:] + ']' + deriv['pubkey'] + + if not multisig and typ == 'legacy': + # P2PKH + assert_equal(info['desc'], "pkh(%s)" % key_descs[info['pubkey']]) + elif not multisig and typ == 'p2sh-segwit': + # P2SH-P2WPKH + assert_equal(info['desc'], "sh(wpkh(%s))" % key_descs[info['pubkey']]) + elif not multisig and typ == 'bech32': + # P2WPKH + assert_equal(info['desc'], "wpkh(%s)" % key_descs[info['pubkey']]) + elif typ == 'legacy': + # P2SH-multisig + assert_equal(info['desc'], "sh(multi(2,%s,%s))" % (key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])) + elif typ == 'p2sh-segwit': + # P2SH-P2WSH-multisig + assert_equal(info['desc'], "sh(wsh(multi(2,%s,%s)))" % (key_descs[info['embedded']['pubkeys'][0]], key_descs[info['embedded']['pubkeys'][1]])) + elif typ == 'bech32': + # P2WSH-multisig + assert_equal(info['desc'], "wsh(multi(2,%s,%s))" % (key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])) + else: + # Unknown type + assert(False) + def test_change_output_type(self, node_sender, destinations, expected_type): txid = self.nodes[node_sender].sendmany(dummy="", amounts=dict.fromkeys(destinations, 0.001)) raw_tx = self.nodes[node_sender].getrawtransaction(txid) @@ -198,6 +241,7 @@ class AddressTypeTest(BitcoinTestFramework): self.log.debug("Old balances are {}".format(old_balances)) to_send = (old_balances[from_node] / 101).quantize(Decimal("0.00000001")) sends = {} + addresses = {} self.log.debug("Prepare sends") for n, to_node in enumerate(range(from_node, from_node + 4)): @@ -228,6 +272,7 @@ class AddressTypeTest(BitcoinTestFramework): # Output entry sends[address] = to_send * 10 * (1 + n) + addresses[to_node] = (address, typ) self.log.debug("Sending: {}".format(sends)) self.nodes[from_node].sendmany("", sends) @@ -244,6 +289,17 @@ class AddressTypeTest(BitcoinTestFramework): self.nodes[5].generate(1) sync_blocks(self.nodes) + # Verify that the receiving wallet contains a UTXO with the expected address, and expected descriptor + for n, to_node in enumerate(range(from_node, from_node + 4)): + to_node %= 4 + found = False + for utxo in self.nodes[to_node].listunspent(): + if utxo['address'] == addresses[to_node][0]: + found = True + self.test_desc(to_node, addresses[to_node][0], multisig, addresses[to_node][1], utxo) + break + assert found + new_balances = self.get_balances() self.log.debug("Check new balances: {}".format(new_balances)) # We don't know what fee was set, so we can only check bounds on the balance of the sending node diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 32ec385fa1..dd3750203a 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -48,7 +48,7 @@ class WalletBackupTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def setup_network(self, split=False): + def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py new file mode 100755 index 0000000000..05c97e0340 --- /dev/null +++ b/test/functional/wallet_balance.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet balance RPC methods.""" +from decimal import Decimal + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) + +RANDOM_COINBASE_ADDRESS = 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ' + +def create_transactions(node, address, amt, fees): + # Create and sign raw transactions from node to address for amt. + # Creates a transaction for each fee and returns an array + # of the raw transactions. + utxos = node.listunspent(0) + + # Create transactions + inputs = [] + ins_total = 0 + for utxo in utxos: + inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) + ins_total += utxo['amount'] + if ins_total > amt: + break + + txs = [] + for fee in fees: + outputs = {address: amt, node.getrawchangeaddress(): ins_total - amt - fee} + raw_tx = node.createrawtransaction(inputs, outputs, 0, True) + raw_tx = node.signrawtransactionwithwallet(raw_tx) + txs.append(raw_tx) + + return txs + +class WalletTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def run_test(self): + # Check that nodes don't own any UTXOs + assert_equal(len(self.nodes[0].listunspent()), 0) + assert_equal(len(self.nodes[1].listunspent()), 0) + + self.log.info("Mining one block for each node") + + self.nodes[0].generate(1) + self.sync_all() + self.nodes[1].generate(1) + self.nodes[1].generatetoaddress(100, RANDOM_COINBASE_ADDRESS) + self.sync_all() + + assert_equal(self.nodes[0].getbalance(), 50) + assert_equal(self.nodes[1].getbalance(), 50) + + self.log.info("Test getbalance with different arguments") + assert_equal(self.nodes[0].getbalance("*"), 50) + assert_equal(self.nodes[0].getbalance("*", 1), 50) + assert_equal(self.nodes[0].getbalance("*", 1, True), 50) + assert_equal(self.nodes[0].getbalance(minconf=1), 50) + + # Send 40 BTC from 0 to 1 and 60 BTC from 1 to 0. + txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')]) + self.nodes[0].sendrawtransaction(txs[0]['hex']) + self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation + + self.sync_all() + txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')]) + self.nodes[1].sendrawtransaction(txs[0]['hex']) + self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation + self.sync_all() + + # First argument of getbalance must be set to "*" + assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "") + + self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs") + + # getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions + assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send + assert_equal(self.nodes[1].getbalance(), Decimal('29.99')) # change from node 1's send + # Same with minconf=0 + assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99')) + assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('29.99')) + # getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago + # TODO: fix getbalance tracking of coin spentness depth + assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0')) + assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0')) + # getunconfirmedbalance + assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend + assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # Doesn't include output of node 0's send since it was spent + + # Node 1 bumps the transaction fee and resends + self.nodes[1].sendrawtransaction(txs[1]['hex']) + self.sync_all() + + self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs") + + assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60')) # output of node 1's send + assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) + assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('0')) # Doesn't include output of node 0's send since it was spent + assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) + + self.nodes[1].generatetoaddress(1, RANDOM_COINBASE_ADDRESS) + self.sync_all() + + # balances are correct after the transactions are confirmed + assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send + assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send + + # Send total balance away from node 1 + txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')]) + self.nodes[1].sendrawtransaction(txs[0]['hex']) + self.nodes[1].generatetoaddress(2, RANDOM_COINBASE_ADDRESS) + self.sync_all() + + # getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago + # TODO: fix getbalance tracking of coin spentness depth + # getbalance with minconf=3 should still show the old balance + assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0')) + + # getbalance with minconf=2 will show the new balance. + assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0')) + +if __name__ == '__main__': + WalletTest().main() diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index c9b40905f0..7184bb8cb6 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -67,15 +67,6 @@ class WalletTest(BitcoinTestFramework): assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 0) - # Check getbalance with different arguments - assert_equal(self.nodes[0].getbalance("*"), 50) - assert_equal(self.nodes[0].getbalance("*", 1), 50) - assert_equal(self.nodes[0].getbalance("*", 1, True), 50) - assert_equal(self.nodes[0].getbalance(minconf=1), 50) - - # first argument of getbalance must be excluded or set to "*" - assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[0].getbalance, "") - # Check that only first and second nodes have UTXOs utxos = self.nodes[0].listunspent() assert_equal(len(utxos), 1) @@ -248,10 +239,6 @@ class WalletTest(BitcoinTestFramework): assert(txid1 in self.nodes[3].getrawmempool()) - # Exercise balance rpcs - assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) - assert_equal(self.nodes[0].getunconfirmedbalance(), 1) - # check if we can list zero value tx as available coins # 1. create raw_tx # 2. hex-changed one output to 0.0 diff --git a/test/functional/wallet_coinbase_category.py b/test/functional/wallet_coinbase_category.py new file mode 100755 index 0000000000..7aa8b44ebd --- /dev/null +++ b/test/functional/wallet_coinbase_category.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test coinbase transactions return the correct categories. + +Tests listtransactions, listsinceblock, and gettransaction. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_array_result +) + +class CoinbaseCategoryTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def assert_category(self, category, address, txid, skip): + assert_array_result(self.nodes[0].listtransactions(skip=skip), + {"address": address}, + {"category": category}) + assert_array_result(self.nodes[0].listsinceblock()["transactions"], + {"address": address}, + {"category": category}) + assert_array_result(self.nodes[0].gettransaction(txid)["details"], + {"address": address}, + {"category": category}) + + def run_test(self): + # Generate one block to an address + address = self.nodes[0].getnewaddress() + self.nodes[0].generatetoaddress(1, address) + hash = self.nodes[0].getbestblockhash() + txid = self.nodes[0].getblock(hash)["tx"][0] + + # Coinbase transaction is immature after 1 confirmation + self.assert_category("immature", address, txid, 0) + + # Mine another 99 blocks on top + self.nodes[0].generate(99) + # Coinbase transaction is still immature after 100 confirmations + self.assert_category("immature", address, txid, 99) + + # Mine one more block + self.nodes[0].generate(1) + # Coinbase transaction is now matured, so category is "generate" + self.assert_category("generate", address, txid, 100) + + # Orphan block that paid to address + self.nodes[0].invalidateblock(hash) + # Coinbase transaction is now orphaned + self.assert_category("orphan", address, txid, 100) + +if __name__ == '__main__': + CoinbaseCategoryTest().main() diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index 20cb816ee8..3f39654bb8 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -94,7 +94,7 @@ class WalletDumpTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def setup_network(self, split=False): + def setup_network(self): self.add_nodes(self.num_nodes, extra_args=self.extra_args) self.start_nodes() diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py index ab9ebed8d4..c514b7e0b4 100755 --- a/test/functional/wallet_encryption.py +++ b/test/functional/wallet_encryption.py @@ -31,12 +31,18 @@ class WalletEncryptionTest(BitcoinTestFramework): privkey = self.nodes[0].dumpprivkey(address) assert_equal(privkey[:1], "c") assert_equal(len(privkey), 52) + assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrase was called", self.nodes[0].walletpassphrase, 'ff', 1) + assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrasechange was called.", self.nodes[0].walletpassphrasechange, 'ff', 'ff') # Encrypt the wallet + assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].encryptwallet, '') self.nodes[0].encryptwallet(passphrase) # Test that the wallet is encrypted assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) + assert_raises_rpc_error(-15, "Error: running with an encrypted wallet, but encryptwallet was called.", self.nodes[0].encryptwallet, 'ff') + assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrase, '', 1) + assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrasechange, '', 'ff') # Check that walletpassphrase works self.nodes[0].walletpassphrase(passphrase, 2) diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py index 9d61483868..5452433acf 100755 --- a/test/functional/wallet_groups.py +++ b/test/functional/wallet_groups.py @@ -21,7 +21,7 @@ class WalletGroupTest(BitcoinTestFramework): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [[], [], ['-avoidpartialspends']] - self.rpc_timewait = 120 + self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 5c789b1c03..3492075694 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -2,9 +2,43 @@ # Copyright (c) 2014-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the importmulti RPC.""" - -from test_framework import script +"""Test the importmulti RPC. + +Test importmulti by generating keys on node0, importing the scriptPubKeys and +addresses on node1 and then testing the address info for the different address +variants. + +- `get_key()` and `get_multisig()` are called to generate keys on node0 and + return the privkeys, pubkeys and all variants of scriptPubKey and address. +- `test_importmulti()` is called to send an importmulti call to node1, test + success, and (if unsuccessful) test the error code and error message returned. +- `test_address()` is called to call getaddressinfo for an address on node1 + and test the values returned.""" +from collections import namedtuple + +from test_framework.address import ( + key_to_p2pkh, + key_to_p2sh_p2wpkh, + key_to_p2wpkh, + script_to_p2sh, + script_to_p2sh_p2wsh, + script_to_p2wsh, +) +from test_framework.script import ( + CScript, + OP_0, + OP_2, + OP_3, + OP_CHECKMULTISIG, + OP_CHECKSIG, + OP_DUP, + OP_EQUAL, + OP_EQUALVERIFY, + OP_HASH160, + OP_NOP, + hash160, + sha256, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -13,12 +47,26 @@ from test_framework.util import ( bytes_to_hex_str, hex_str_to_bytes ) -from test_framework.script import ( - CScript, - OP_0, - hash160 -) -from test_framework.messages import sha256 + +Key = namedtuple('Key', ['privkey', + 'pubkey', + 'p2pkh_script', + 'p2pkh_addr', + 'p2wpkh_script', + 'p2wpkh_addr', + 'p2sh_p2wpkh_script', + 'p2sh_p2wpkh_redeem_script', + 'p2sh_p2wpkh_addr']) + +Multisig = namedtuple('Multisig', ['privkeys', + 'pubkeys', + 'p2sh_script', + 'p2sh_addr', + 'redeem_script', + 'p2wsh_script', + 'p2wsh_addr', + 'p2sh_p2wsh_script', + 'p2sh_p2wsh_addr']) class ImportMultiTest(BitcoinTestFramework): def set_test_params(self): @@ -32,7 +80,68 @@ class ImportMultiTest(BitcoinTestFramework): def setup_network(self): self.setup_nodes() - def run_test (self): + def get_key(self): + """Generate a fresh key on node0 + + Returns a named tuple of privkey, pubkey and all address and scripts.""" + addr = self.nodes[0].getnewaddress() + pubkey = self.nodes[0].getaddressinfo(addr)['pubkey'] + pkh = hash160(hex_str_to_bytes(pubkey)) + return Key(self.nodes[0].dumpprivkey(addr), + pubkey, + CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), # p2pkh + key_to_p2pkh(pubkey), # p2pkh addr + CScript([OP_0, pkh]).hex(), # p2wpkh + key_to_p2wpkh(pubkey), # p2wpkh addr + CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(), # p2sh-p2wpkh + CScript([OP_0, pkh]).hex(), # p2sh-p2wpkh redeem script + key_to_p2sh_p2wpkh(pubkey)) # p2sh-p2wpkh addr + + def get_multisig(self): + """Generate a fresh multisig on node0 + + Returns a named tuple of privkeys, pubkeys and all address and scripts.""" + addrs = [] + pubkeys = [] + for _ in range(3): + addr = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) + addrs.append(addr['address']) + pubkeys.append(addr['pubkey']) + script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG]) + witness_script = CScript([OP_0, sha256(script_code)]) + return Multisig([self.nodes[0].dumpprivkey(addr) for addr in addrs], + pubkeys, + CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), # p2sh + script_to_p2sh(script_code), # p2sh addr + script_code.hex(), # redeem script + witness_script.hex(), # p2wsh + script_to_p2wsh(script_code), # p2wsh addr + CScript([OP_HASH160, witness_script, OP_EQUAL]).hex(), # p2sh-p2wsh + script_to_p2sh_p2wsh(script_code)) # p2sh-p2wsh addr + + def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=[]): + """Run importmulti and assert success""" + result = self.nodes[1].importmulti([req]) + observed_warnings = [] + if 'warnings' in result[0]: + observed_warnings = result[0]['warnings'] + assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings))) + assert_equal(result[0]['success'], success) + if error_code is not None: + assert_equal(result[0]['error']['code'], error_code) + assert_equal(result[0]['error']['message'], error_message) + + def test_address(self, address, **kwargs): + """Get address info for `address` and test whether the returned values are as expected.""" + addr_info = self.nodes[1].getaddressinfo(address) + for key, value in kwargs.items(): + if value is None: + if key in addr_info.keys(): + raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key)) + elif addr_info[key] != value: + raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value)) + + def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.nodes[1].generate(1) @@ -40,587 +149,474 @@ class ImportMultiTest(BitcoinTestFramework): node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - #Check only one address + # Check only one address assert_equal(node0_address1['ismine'], True) - #Node 1 sync test - assert_equal(self.nodes[1].getblockcount(),1) + # Node 1 sync test + assert_equal(self.nodes[1].getblockcount(), 1) - #Address Test - before import + # Address Test - before import address_info = self.nodes[1].getaddressinfo(node0_address1['address']) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) - # RPC importmulti ----------------------------------------------- # Bitcoin Address (implicit non-internal) self.log.info("Should import an address") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - assert_equal(address_assert['ischange'], False) - watchonly_address = address['address'] + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now"}, + True) + self.test_address(address, + iswatchonly=True, + ismine=False, + timestamp=timestamp, + ischange=False) + watchonly_address = address watchonly_timestamp = timestamp self.log.info("Should not import an invalid address") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": "not valid address", - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Invalid address') + self.test_importmulti({"scriptPubKey": {"address": "not valid address"}, + "timestamp": "now"}, + False, + error_code=-5, + error_message='Invalid address \"not valid address\"') # ScriptPubKey + internal self.log.info("Should import a scriptPubKey with internal flag") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "internal": True - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - assert_equal(address_assert['ischange'], True) + key = self.get_key() + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "internal": True}, + True) + self.test_address(key.p2pkh_addr, + iswatchonly=True, + ismine=False, + timestamp=timestamp, + ischange=True) # ScriptPubKey + internal + label self.log.info("Should not allow a label to be specified when internal is true") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "internal": True, - "label": "Example label" - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal addresses should not have a label') + key = self.get_key() + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "internal": True, + "label": "Example label"}, + False, + error_code=-8, + error_message='Internal addresses should not have a label') # Nonstandard scriptPubKey + !internal self.log.info("Should not import a nonstandard scriptPubKey without internal flag") - nonstandardScriptPubKey = address['scriptPubKey'] + bytes_to_hex_str(script.CScript([script.OP_NOP])) - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": nonstandardScriptPubKey, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set to true for nonstandard scriptPubKey imports.') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + nonstandardScriptPubKey = key.p2pkh_script + bytes_to_hex_str(CScript([OP_NOP])) + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey, + "timestamp": "now"}, + False, + error_code=-8, + error_message='Internal must be set to true for nonstandard scriptPubKey imports.') + self.test_address(address, + iswatchonly=False, + ismine=False, + timestamp=None) # Address + Public key + !Internal(explicit) self.log.info("Should import an address with public key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "pubkeys": [ address['pubkey'] ], - "internal": False - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "pubkeys": [key.pubkey], + "internal": False}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + timestamp=timestamp) # ScriptPubKey + Public key + internal self.log.info("Should import a scriptPubKey with internal and with public key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "pubkeys": [ address['pubkey'] ], - "internal": True - }] - result = self.nodes[1].importmulti(requests=request) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "pubkeys": [key.pubkey], + "internal": True}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + timestamp=timestamp) # Nonstandard scriptPubKey + Public key + !internal self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": nonstandardScriptPubKey, - "timestamp": "now", - "pubkeys": [ address['pubkey'] ] - }] - result = self.nodes[1].importmulti(requests=request) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set to true for nonstandard scriptPubKey imports.') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey, + "timestamp": "now", + "pubkeys": [key.pubkey]}, + False, + error_code=-8, + error_message='Internal must be set to true for nonstandard scriptPubKey imports.') + self.test_address(address, + iswatchonly=False, + ismine=False, + timestamp=None) # Address + Private key + !watchonly self.log.info("Should import an address with private key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], True) - assert_equal(address_assert['timestamp'], timestamp) + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "keys": [key.privkey]}, + True) + self.test_address(address, + iswatchonly=False, + ismine=True, + timestamp=timestamp) self.log.info("Should not import an address with private key if is already imported") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -4) - assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "keys": [key.privkey]}, + False, + error_code=-4, + error_message='The wallet already contains the private key for this address or script') # Address + Private key + watchonly - self.log.info("Should not import an address with private key and with watchonly") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ], - "watchonly": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Watch-only addresses should not include private keys') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) + self.log.info("Should import an address with private key and with watchonly") + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "keys": [key.privkey], + "watchonly": True}, + True, + warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."]) + self.test_address(address, + iswatchonly=False, + ismine=True, + timestamp=timestamp) # ScriptPubKey + Private key + internal self.log.info("Should import a scriptPubKey with internal and with private key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ], - "internal": True - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], True) - assert_equal(address_assert['timestamp'], timestamp) + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "keys": [key.privkey], + "internal": True}, + True) + self.test_address(address, + iswatchonly=False, + ismine=True, + timestamp=timestamp) # Nonstandard scriptPubKey + Private key + !internal self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": nonstandardScriptPubKey, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set to true for nonstandard scriptPubKey imports.') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + key = self.get_key() + address = key.p2pkh_addr + self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey, + "timestamp": "now", + "keys": [key.privkey]}, + False, + error_code=-8, + error_message='Internal must be set to true for nonstandard scriptPubKey imports.') + self.test_address(address, + iswatchonly=False, + ismine=False, + timestamp=None) # P2SH address - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + multisig = self.get_multisig() self.nodes[1].generate(100) - self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['isscript'], True) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['timestamp'], timestamp) - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr}, + "timestamp": "now"}, + True) + self.test_address(multisig.p2sh_addr, + isscript=True, + iswatchonly=True, + timestamp=timestamp) + p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], False) - # P2SH + Redeem script - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + multisig = self.get_multisig() self.nodes[1].generate(100) - self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh with respective redeem script") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['timestamp'], timestamp) - - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr}, + "timestamp": "now", + "redeemscript": multisig.redeem_script}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True) + + p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) - # P2SH + Redeem script + Private Keys + !Watchonly - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + multisig = self.get_multisig() self.nodes[1].generate(100) - self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'], - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['timestamp'], timestamp) - - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr}, + "timestamp": "now", + "redeemscript": multisig.redeem_script, + "keys": multisig.privkeys[0:2]}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(multisig.p2sh_addr, + timestamp=timestamp, + ismine=False, + iswatchonly=True, + solvable=True) + + p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + Watchonly - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + multisig = self.get_multisig() self.nodes[1].generate(100) - self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'], - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], - "watchonly": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Watch-only addresses should not include private keys') - + self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr}, + "timestamp": "now", + "redeemscript": multisig.redeem_script, + "keys": multisig.privkeys[0:2], + "watchonly": True}, + True) + self.test_address(multisig.p2sh_addr, + iswatchonly=True, + ismine=False, + solvable=True, + timestamp=timestamp) # Address + Public key + !Internal + Wrong pubkey - self.log.info("Should not import an address with a wrong public key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "pubkeys": [ address2['pubkey'] ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Key does not match address destination') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + self.log.info("Should not import an address with the wrong public key as non-solvable") + key = self.get_key() + address = key.p2pkh_addr + wrong_key = self.get_key().pubkey + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "pubkeys": [wrong_key]}, + True, + warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + solvable=False, + timestamp=timestamp) # ScriptPubKey + Public key + internal + Wrong pubkey - self.log.info("Should not import a scriptPubKey with internal and with a wrong public key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "pubkeys": [ address2['pubkey'] ], - "internal": True - }] - result = self.nodes[1].importmulti(request) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Key does not match address destination') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable") + key = self.get_key() + address = key.p2pkh_addr + wrong_key = self.get_key().pubkey + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "pubkeys": [wrong_key], + "internal": True}, + True, + warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + solvable=False, + timestamp=timestamp) # Address + Private key + !watchonly + Wrong private key - self.log.info("Should not import an address with a wrong private key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address2['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Key does not match address destination') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + self.log.info("Should import an address with a wrong private key as non-solvable") + key = self.get_key() + address = key.p2pkh_addr + wrong_privkey = self.get_key().privkey + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "keys": [wrong_privkey]}, + True, + warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + solvable=False, + timestamp=timestamp) # ScriptPubKey + Private key + internal + Wrong private key - self.log.info("Should not import a scriptPubKey with internal and with a wrong private key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address2['address']) ], - "internal": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Key does not match address destination') - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - + self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable") + key = self.get_key() + address = key.p2pkh_addr + wrong_privkey = self.get_key().privkey + self.test_importmulti({"scriptPubKey": key.p2pkh_script, + "timestamp": "now", + "keys": [wrong_privkey], + "internal": True}, + True, + warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + iswatchonly=True, + ismine=False, + solvable=False, + timestamp=timestamp) # Importing existing watch only address with new timestamp should replace saved timestamp. assert_greater_than(timestamp, watchonly_timestamp) self.log.info("Should replace previously saved watch only timestamp.") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": watchonly_address, - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(watchonly_address) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) + self.test_importmulti({"scriptPubKey": {"address": watchonly_address}, + "timestamp": "now"}, + True) + self.test_address(watchonly_address, + iswatchonly=True, + ismine=False, + timestamp=timestamp) watchonly_timestamp = timestamp - # restart nodes to check for proper serialization/deserialization of watch only address self.stop_nodes() self.start_nodes() - address_assert = self.nodes[1].getaddressinfo(watchonly_address) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], watchonly_timestamp) + self.test_address(watchonly_address, + iswatchonly=True, + ismine=False, + timestamp=watchonly_timestamp) # Bad or missing timestamps self.log.info("Should throw on invalid or missing timestamp values") assert_raises_rpc_error(-3, 'Missing required timestamp field for key', - self.nodes[1].importmulti, [{ - "scriptPubKey": address['scriptPubKey'], - }]) + self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}]) assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string', - self.nodes[1].importmulti, [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "", - }]) + self.nodes[1].importmulti, [{ + "scriptPubKey": key.p2pkh_script, + "timestamp": "" + }]) # Import P2WPKH address as watch only self.log.info("Should import a P2WPKH address as watch only") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress(address_type="bech32")) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['solvable'], False) + key = self.get_key() + address = key.p2wpkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now"}, + True) + self.test_address(address, + iswatchonly=True, + solvable=False) # Import P2WPKH address with public key but no private key self.log.info("Should import a P2WPKH address and public key as solvable but not spendable") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress(address_type="bech32")) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "pubkeys": [ address['pubkey'] ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['solvable'], True) + key = self.get_key() + address = key.p2wpkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "pubkeys": [key.pubkey]}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + ismine=False, + solvable=True) # Import P2WPKH address with key and check it is spendable self.log.info("Should import a P2WPKH address with key") - address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress(address_type="bech32")) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [self.nodes[0].dumpprivkey(address['address'])] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], True) + key = self.get_key() + address = key.p2wpkh_addr + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "keys": [key.privkey]}, + True) + self.test_address(address, + iswatchonly=False, + ismine=True) # P2WSH multisig address without scripts or keys - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].addmultisigaddress(2, [sig_address_1['pubkey'], sig_address_2['pubkey']], "", "bech32") + multisig = self.get_multisig() self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now" - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['solvable'], False) + self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr}, + "timestamp": "now"}, + True) + self.test_address(multisig.p2sh_addr, + solvable=False) # Same P2WSH multisig address as above, but now with witnessscript + private keys - self.log.info("Should import a p2wsh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "witnessscript": multi_sig_script['redeemScript'], - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address']) ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['solvable'], True) - assert_equal(address_assert['ismine'], True) - assert_equal(address_assert['sigsrequired'], 2) + self.log.info("Should import a p2wsh with respective witness script and private keys") + self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr}, + "timestamp": "now", + "witnessscript": multisig.redeem_script, + "keys": multisig.privkeys}, + True) + self.test_address(multisig.p2sh_addr, + solvable=True, + ismine=True, + sigsrequired=2) # P2SH-P2WPKH address with no redeemscript or public or private key - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress(address_type="p2sh-segwit")) - pubkeyhash = hash160(hex_str_to_bytes(sig_address_1['pubkey'])) - pkscript = CScript([OP_0, pubkeyhash]) + key = self.get_key() + address = key.p2sh_p2wpkh_addr self.log.info("Should import a p2sh-p2wpkh without redeem script or keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": sig_address_1['address'] - }, - "timestamp": "now" - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(sig_address_1['address']) - assert_equal(address_assert['solvable'], False) - assert_equal(address_assert['ismine'], False) + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now"}, + True) + self.test_address(address, + solvable=False, + ismine=False) # P2SH-P2WPKH address + redeemscript + public key with no private key self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": sig_address_1['address'] - }, - "timestamp": "now", - "redeemscript": bytes_to_hex_str(pkscript), - "pubkeys": [ sig_address_1['pubkey'] ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(sig_address_1['address']) - assert_equal(address_assert['solvable'], True) - assert_equal(address_assert['ismine'], False) + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "redeemscript": key.p2sh_p2wpkh_redeem_script, + "pubkeys": [key.pubkey]}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + solvable=True, + ismine=False) # P2SH-P2WPKH address + redeemscript + private key - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress(address_type="p2sh-segwit")) - pubkeyhash = hash160(hex_str_to_bytes(sig_address_1['pubkey'])) - pkscript = CScript([OP_0, pubkeyhash]) + key = self.get_key() + address = key.p2sh_p2wpkh_addr self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": sig_address_1['address'] - }, - "timestamp": "now", - "redeemscript": bytes_to_hex_str(pkscript), - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address'])] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(sig_address_1['address']) - assert_equal(address_assert['solvable'], True) - assert_equal(address_assert['ismine'], True) - - # P2SH-P2WSH 1-of-1 multisig + redeemscript with no private key - sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].addmultisigaddress(1, [sig_address_1['pubkey']], "", "p2sh-segwit") - scripthash = sha256(hex_str_to_bytes(multi_sig_script['redeemScript'])) - redeem_script = CScript([OP_0, scripthash]) + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "redeemscript": key.p2sh_p2wpkh_redeem_script, + "keys": [key.privkey]}, + True) + self.test_address(address, + solvable=True, + ismine=True) + + # P2SH-P2WSH multisig + redeemscript with no private key + multisig = self.get_multisig() + address = multisig.p2sh_p2wsh_addr self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": bytes_to_hex_str(redeem_script), - "witnessscript": multi_sig_script['redeemScript'] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address']) - assert_equal(address_assert['solvable'], True) + self.test_importmulti({"scriptPubKey": {"address": address}, + "timestamp": "now", + "redeemscript": multisig.p2wsh_script, + "witnessscript": multisig.redeem_script}, + True, + warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."]) + self.test_address(address, + solvable=True, + ismine=False) if __name__ == '__main__': - ImportMultiTest ().main () + ImportMultiTest().main() diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index f1a441c399..b7c8d3098d 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -24,8 +24,8 @@ from test_framework.util import ( class KeypoolRestoreTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [[], ['-keypool=100']] + self.num_nodes = 4 + self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']] def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -40,32 +40,47 @@ class KeypoolRestoreTest(BitcoinTestFramework): shutil.copyfile(wallet_path, wallet_backup_path) self.start_node(1, self.extra_args[1]) connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 0, 2) + connect_nodes_bi(self.nodes, 0, 3) - self.log.info("Generate keys for wallet") - for _ in range(90): - addr_oldpool = self.nodes[1].getnewaddress() - for _ in range(20): - addr_extpool = self.nodes[1].getnewaddress() + for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]): - self.log.info("Send funds to wallet") - self.nodes[0].sendtoaddress(addr_oldpool, 10) - self.nodes[0].generate(1) - self.nodes[0].sendtoaddress(addr_extpool, 5) - self.nodes[0].generate(1) - sync_blocks(self.nodes) + self.log.info("Generate keys for wallet with address type: {}".format(output_type)) + idx = i+1 + for _ in range(90): + addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type) + for _ in range(20): + addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type) - self.log.info("Restart node with wallet backup") - self.stop_node(1) - shutil.copyfile(wallet_backup_path, wallet_path) - self.start_node(1, self.extra_args[1]) - connect_nodes_bi(self.nodes, 0, 1) - self.sync_all() + # Make sure we're creating the outputs we expect + address_details = self.nodes[idx].validateaddress(addr_extpool) + if i == 0: + assert(not address_details["isscript"] and not address_details["iswitness"]) + elif i == 1: + assert(address_details["isscript"] and not address_details["iswitness"]) + else: + assert(not address_details["isscript"] and address_details["iswitness"]) + + + self.log.info("Send funds to wallet") + self.nodes[0].sendtoaddress(addr_oldpool, 10) + self.nodes[0].generate(1) + self.nodes[0].sendtoaddress(addr_extpool, 5) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + self.log.info("Restart node with wallet backup") + self.stop_node(idx) + shutil.copyfile(wallet_backup_path, wallet_path) + self.start_node(idx, self.extra_args[idx]) + connect_nodes_bi(self.nodes, 0, idx) + self.sync_all() - self.log.info("Verify keypool is restored and balance is correct") - assert_equal(self.nodes[1].getbalance(), 15) - assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive") - # Check that we have marked all keys up to the used keypool key as used - assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'") + self.log.info("Verify keypool is restored and balance is correct") + assert_equal(self.nodes[idx].getbalance(), 15) + assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive") + # Check that we have marked all keys up to the used keypool key as used + assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress())['hdkeypath'], "m/0'/0'/110'") if __name__ == '__main__': diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 8ca0387268..17f044bf65 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -25,12 +25,13 @@ def tx_from_hex(hexstring): class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 - self.enable_mocktime() def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): + self.nodes[0].generate(1) # Get out of IBD + self.sync_all() # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py index b0d9f87958..4facd6c334 100755 --- a/test/lint/check-doc.py +++ b/test/lint/check-doc.py @@ -26,8 +26,12 @@ SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb']) def main(): - used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True, encoding='utf8') - docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True, encoding='utf8') + if sys.version_info >= (3, 6): + used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True, encoding='utf8') + docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True, encoding='utf8') + else: + used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True) # encoding='utf8' + docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True) # encoding='utf8' args_used = set(re.findall(re.compile(REGEX_ARG), used)) args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL) diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh index f1327469f3..4267f9fa0d 100755 --- a/test/lint/commit-script-check.sh +++ b/test/lint/commit-script-check.sh @@ -20,23 +20,23 @@ fi RET=0 PREV_BRANCH=`git name-rev --name-only HEAD` PREV_HEAD=`git rev-parse HEAD` -for i in `git rev-list --reverse $1`; do - if git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:"; then - git checkout --quiet $i^ || exit - SCRIPT="`git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`" +for commit in `git rev-list --reverse $1`; do + if git rev-list -n 1 --pretty="%s" $commit | grep -q "^scripted-diff:"; then + git checkout --quiet $commit^ || exit + SCRIPT="`git rev-list --format=%b -n1 $commit | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`" if test "x$SCRIPT" = "x"; then - echo "Error: missing script for: $i" + echo "Error: missing script for: $commit" echo "Failed" RET=1 else - echo "Running script for: $i" + echo "Running script for: $commit" echo "$SCRIPT" - eval "$SCRIPT" - git --no-pager diff --exit-code $i && echo "OK" || (echo "Failed"; false) || RET=1 + (eval "$SCRIPT") + git --no-pager diff --exit-code $commit && echo "OK" || (echo "Failed"; false) || RET=1 fi git reset --quiet --hard HEAD else - if git rev-list "--format=%b" -n1 $i | grep -q '^-\(BEGIN\|END\)[ a-zA-Z]*-$'; then + if git rev-list "--format=%b" -n1 $commit | grep -q '^-\(BEGIN\|END\)[ a-zA-Z]*-$'; then echo "Error: script block marker but no scripted-diff in title" echo "Failed" RET=1 diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py index 5caebf3739..f5d4780b6d 100755 --- a/test/lint/lint-format-strings.py +++ b/test/lint/lint-format-strings.py @@ -241,12 +241,11 @@ def count_format_specifiers(format_string): 4 """ assert(type(format_string) is str) + format_string = format_string.replace('%%', 'X') n = 0 in_specifier = False for i, char in enumerate(format_string): - if format_string[i - 1:i + 1] == "%%" or format_string[i:i + 2] == "%%": - pass - elif char == "%": + if char == "%": in_specifier = True n += 1 elif char in "aAcdeEfFgGinopsuxX": diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh index f6d0fd382b..4b9e2615b6 100755 --- a/test/lint/lint-includes.sh +++ b/test/lint/lint-includes.sh @@ -50,7 +50,6 @@ EXPECTED_BOOST_INCLUDES=( boost/algorithm/string/classification.hpp boost/algorithm/string/replace.hpp boost/algorithm/string/split.hpp - boost/bind.hpp boost/chrono/chrono.hpp boost/date_time/posix_time/posix_time.hpp boost/filesystem.hpp diff --git a/test/lint/lint-python-dead-code.sh b/test/lint/lint-python-dead-code.sh index 3341f794f9..1b897cd131 100755 --- a/test/lint/lint-python-dead-code.sh +++ b/test/lint/lint-python-dead-code.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying @@ -16,4 +16,4 @@ fi vulture \ --min-confidence 60 \ --ignore-names "argtypes,connection_lost,connection_made,converter,data_received,daemon,errcheck,get_ecdh_key,get_privkey,is_compressed,is_fullyvalid,msg_generic,on_*,optionxform,restype,set_privkey" \ - $(git ls-files -- "*.py" ":(exclude)contrib/") + $(git ls-files -- "*.py" ":(exclude)contrib/" ":(exclude)test/functional/data/invalid_txs.py") diff --git a/test/lint/lint-python.sh b/test/lint/lint-python.sh index d44a585294..3dbb9fff28 100755 --- a/test/lint/lint-python.sh +++ b/test/lint/lint-python.sh @@ -36,6 +36,7 @@ export LC_ALL=C # E701 multiple statements on one line (colon) # E702 multiple statements on one line (semicolon) # E703 statement ends with a semicolon +# E711 comparison to None should be 'if cond is None:' # E714 test for object identity should be "is not" # E721 do not compare types, use "isinstance()" # E741 do not use variables named "l", "O", or "I" @@ -87,4 +88,4 @@ elif PYTHONWARNINGS="ignore" flake8 --version | grep -q "Python 2"; then exit 0 fi -PYTHONWARNINGS="ignore" flake8 --ignore=B,C,E,F,I,N,W --select=E101,E112,E113,E115,E116,E125,E129,E131,E133,E223,E224,E242,E266,E271,E272,E273,E274,E275,E304,E306,E401,E402,E502,E701,E702,E703,E714,E721,E741,E742,E743,E901,E902,F401,F402,F403,F404,F405,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F821,F822,F823,F831,F841,W191,W291,W292,W293,W504,W601,W602,W603,W604,W605,W606 "${@:-.}" +PYTHONWARNINGS="ignore" flake8 --ignore=B,C,E,F,I,N,W --select=E101,E112,E113,E115,E116,E125,E129,E131,E133,E223,E224,E242,E266,E271,E272,E273,E274,E275,E304,E306,E401,E402,E502,E701,E702,E703,E711,E714,E721,E741,E742,E743,E901,E902,F401,F402,F403,F404,F405,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F821,F822,F823,F831,F841,W191,W291,W292,W293,W504,W601,W602,W603,W604,W605,W606 "${@:-.}" diff --git a/test/lint/lint-python-shebang.sh b/test/lint/lint-shebang.sh index 4ff87f0bf7..fda22592d3 100755 --- a/test/lint/lint-python-shebang.sh +++ b/test/lint/lint-shebang.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Shebang must use python3 (not python or python2) +# Assert expected shebang lines export LC_ALL=C EXIT_CODE=0 @@ -10,4 +10,11 @@ for PYTHON_FILE in $(git ls-files -- "*.py"); do EXIT_CODE=1 fi done +for SHELL_FILE in $(git ls-files -- "*.sh"); do + if [[ $(head -n 1 "${SHELL_FILE}") != "#!/usr/bin/env bash" && + $(head -n 1 "${SHELL_FILE}") != "#!/bin/sh" ]]; then + echo "Missing expected shebang \"#!/usr/bin/env bash\" or \"#!/bin/sh\" in ${SHELL_FILE}" + EXIT_CODE=1 + fi +done exit ${EXIT_CODE} diff --git a/test/sanitizer_suppressions/lsan b/test/sanitizer_suppressions/lsan new file mode 100644 index 0000000000..6f15c0f1d4 --- /dev/null +++ b/test/sanitizer_suppressions/lsan @@ -0,0 +1,6 @@ +# Suppress warnings triggered in dependencies +leak:libcrypto +leak:libqminimal +leak:libQt5Core +leak:libQt5Gui +leak:libQt5Widgets diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan index 209c46f096..70eea34363 100644 --- a/test/sanitizer_suppressions/tsan +++ b/test/sanitizer_suppressions/tsan @@ -1,9 +1,6 @@ # ThreadSanitizer suppressions # ============================ -# fChecked is theoretically racy, practically only in unit tests -race:CheckBlock - # WalletBatch (unidentified deadlock) deadlock:WalletBatch @@ -14,11 +11,6 @@ deadlock:TestPotentialDeadLockDetected race:src/qt/test/* deadlock:src/qt/test/* -# WIP: Unidentified suppressions to run the functional tests -#race:zmqpublishnotifier.cpp -# -#deadlock:CreateWalletFromFile -#deadlock:importprivkey -#deadlock:walletdb.h -#deadlock:walletdb.cpp -#deadlock:wallet/db.cpp +# External libraries +deadlock:libdb +race:libzmq diff --git a/test/util/bitcoin-util-test.py b/test/util/bitcoin-util-test.py index 92fef30e13..7b1cc2b031 100755 --- a/test/util/bitcoin-util-test.py +++ b/test/util/bitcoin-util-test.py @@ -9,14 +9,9 @@ Runs automatically during `make check`. Can also be run manually.""" -from __future__ import division,print_function,unicode_literals - import argparse import binascii -try: - import configparser -except ImportError: - import ConfigParser as configparser +import configparser import difflib import json import logging |