aboutsummaryrefslogtreecommitdiff
path: root/qa
diff options
context:
space:
mode:
Diffstat (limited to 'qa')
-rw-r--r--qa/README.md4
-rwxr-xr-xqa/pull-tester/rpc-tests.py42
-rwxr-xr-xqa/rpc-tests/abandonconflict.py26
-rwxr-xr-xqa/rpc-tests/bip65-cltv-p2p.py40
-rwxr-xr-xqa/rpc-tests/bip65-cltv.py3
-rwxr-xr-xqa/rpc-tests/bipdersig-p2p.py42
-rwxr-xr-xqa/rpc-tests/create_cache.py10
-rwxr-xr-xqa/rpc-tests/importprunedfunds.py37
-rwxr-xr-xqa/rpc-tests/keypool.py10
-rwxr-xr-xqa/rpc-tests/nulldummy.py148
-rwxr-xr-xqa/rpc-tests/p2p-compactblocks.py624
-rwxr-xr-xqa/rpc-tests/p2p-feefilter.py14
-rwxr-xr-xqa/rpc-tests/p2p-fullblocktest.py3
-rwxr-xr-xqa/rpc-tests/p2p-mempool.py7
-rwxr-xr-xqa/rpc-tests/p2p-segwit.py122
-rwxr-xr-xqa/rpc-tests/p2p-versionbits-warning.py42
-rwxr-xr-xqa/rpc-tests/rest.py10
-rwxr-xr-xqa/rpc-tests/rpcbind_test.py221
-rwxr-xr-xqa/rpc-tests/segwit.py61
-rw-r--r--qa/rpc-tests/test_framework/authproxy.py6
-rw-r--r--qa/rpc-tests/test_framework/blockstore.py3
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py373
-rw-r--r--qa/rpc-tests/test_framework/script.py2
-rw-r--r--qa/rpc-tests/test_framework/siphash.py64
-rwxr-xr-xqa/rpc-tests/test_framework/test_framework.py9
-rw-r--r--qa/rpc-tests/test_framework/util.py38
-rwxr-xr-xqa/rpc-tests/wallet-accounts.py94
-rwxr-xr-xqa/rpc-tests/wallet-dump.py104
-rwxr-xr-xqa/rpc-tests/wallet-hd.py8
-rwxr-xr-xqa/rpc-tests/wallet.py5
-rwxr-xr-xqa/rpc-tests/walletbackup.py7
31 files changed, 1799 insertions, 380 deletions
diff --git a/qa/README.md b/qa/README.md
index 723660c6c8..225207cc1c 100644
--- a/qa/README.md
+++ b/qa/README.md
@@ -41,8 +41,8 @@ Run all possible tests with
qa/pull-tester/rpc-tests.py -extended
-By default, tests will be run in parallel if you want to specify how many
-tests should be run in parallel, append `-parallel=n` (default n=4).
+By default, tests will be run in parallel. To specify how many jobs to run,
+append `-parallel=n` (default n=4).
If you want to create a basic coverage report for the rpc test suite, append `--coverage`.
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index 11b83bac14..4a953385e0 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -77,8 +77,6 @@ for arg in sys.argv[1:]:
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT
-if "BITCOINCLI" not in os.environ:
- os.environ["BITCOINCLI"] = BUILDDIR + '/src/bitcoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
@@ -94,23 +92,27 @@ if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
if ENABLE_ZMQ:
try:
import zmq
- except ImportError as e:
- print("WARNING: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
- "to run zmq tests, see dependency info in /qa/README.md.")
- ENABLE_ZMQ=0
+ except ImportError:
+ print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
+ "to run zmq tests, see dependency info in /qa/README.md.")
+ # ENABLE_ZMQ=0
+ raise
-#Tests
testScripts = [
# longest test should go first, to favor running tests in parallel
'p2p-fullblocktest.py',
'walletbackup.py',
'bip68-112-113-p2p.py',
'wallet.py',
+ 'wallet-accounts.py',
'wallet-hd.py',
+ 'wallet-dump.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
+ 'p2p-segwit.py',
+ 'segwit.py',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
@@ -132,15 +134,16 @@ testScripts = [
'disablewallet.py',
'sendheaders.py',
'keypool.py',
+ 'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
- 'p2p-segwit.py',
- 'segwit.py',
'importprunedfunds.py',
'signmessages.py',
+ 'p2p-compactblocks.py',
+ 'nulldummy.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
@@ -158,7 +161,7 @@ testScriptsExt = [
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
-# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
+ 'rpcbind_test.py',
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
@@ -192,6 +195,7 @@ def runtests():
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
+ flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
@@ -212,8 +216,8 @@ def runtests():
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
- print(stdout)
- print('stderr:\n' if not stderr == '' else '', stderr)
+ print('' if passed else stdout + '\n', end='')
+ print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
@@ -248,21 +252,27 @@ class RPCTestHandler:
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed=%s" % len(self.test_list)]
+ log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
+ log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)))
+ stdout=log_stdout,
+ stderr=log_stderr),
+ log_stdout,
+ log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
- (name, time0, proc) = j
+ (name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
- (stdout, stderr) = proc.communicate(timeout=3)
+ log_out.seek(0), log_err.seek(0)
+ [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
+ log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
diff --git a/qa/rpc-tests/abandonconflict.py b/qa/rpc-tests/abandonconflict.py
index c50c3cc562..874df48777 100755
--- a/qa/rpc-tests/abandonconflict.py
+++ b/qa/rpc-tests/abandonconflict.py
@@ -68,7 +68,7 @@ class AbandonConflictTest(BitcoinTestFramework):
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance - Decimal("30") + Decimal("24.9996"))
+ assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
@@ -78,16 +78,16 @@ class AbandonConflictTest(BitcoinTestFramework):
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
- assert(len(self.nodes[0].getrawmempool()) == 0)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance - Decimal("24.9996"))
+ assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
- assert(unconfbalance == newbalance)
+ assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
@@ -96,35 +96,35 @@ class AbandonConflictTest(BitcoinTestFramework):
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance + Decimal("30"))
+ assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
- assert(len(self.nodes[0].getrawmempool()) == 0)
- assert(self.nodes[0].getbalance() == balance)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance - Decimal("20") + Decimal("14.99998"))
+ assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
+ assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
- assert(len(self.nodes[0].getrawmempool()) == 0)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance - Decimal("24.9996"))
+ assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
@@ -143,7 +143,7 @@ class AbandonConflictTest(BitcoinTestFramework):
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
- assert(newbalance == balance + Decimal("20"))
+ assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
@@ -151,7 +151,7 @@ class AbandonConflictTest(BitcoinTestFramework):
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
- #assert(newbalance == balance - Decimal("10"))
+ #assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
diff --git a/qa/rpc-tests/bip65-cltv-p2p.py b/qa/rpc-tests/bip65-cltv-p2p.py
index 754b6873b7..e903b2fbf0 100755
--- a/qa/rpc-tests/bip65-cltv-p2p.py
+++ b/qa/rpc-tests/bip65-cltv-p2p.py
@@ -71,9 +71,9 @@ class BIP65Test(ComparisonTestFramework):
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
- ''' 98 more version 3 blocks '''
+ ''' 398 more version 3 blocks '''
test_blocks = []
- for i in range(98):
+ for i in range(398):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
@@ -118,24 +118,6 @@ class BIP65Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
- '''
- Check that the new CLTV rules are enforced in the 751st version 4
- block.
- '''
- spendtx = self.create_transaction(self.nodes[0],
- self.coinbase_blocks[1], self.nodeaddress, 1.0)
- cltv_invalidate(spendtx)
- spendtx.rehash()
-
- block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
- block.nVersion = 4
- block.vtx.append(spendtx)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- block.solve()
- self.last_block_time += 1
- yield TestInstance([[block, False]])
-
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
@@ -169,6 +151,24 @@ class BIP65Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
+ '''
+ Check that the new CLTV rules are enforced in the 951st version 4
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ cltv_invalidate(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
diff --git a/qa/rpc-tests/bip65-cltv.py b/qa/rpc-tests/bip65-cltv.py
index abba7fc20e..baa77b92a0 100755
--- a/qa/rpc-tests/bip65-cltv.py
+++ b/qa/rpc-tests/bip65-cltv.py
@@ -30,7 +30,8 @@ class BIP65Test(BitcoinTestFramework):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
- self.nodes[1].generate(100)
+ self.nodes[1].generate(200)
+ cnt += 100
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py
index 4e4936a4ae..3bad5af5e6 100755
--- a/qa/rpc-tests/bipdersig-p2p.py
+++ b/qa/rpc-tests/bipdersig-p2p.py
@@ -79,9 +79,9 @@ class BIP66Test(ComparisonTestFramework):
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
- ''' 98 more version 2 blocks '''
+ ''' 298 more version 2 blocks '''
test_blocks = []
- for i in range(98):
+ for i in range(298):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
@@ -124,25 +124,7 @@ class BIP66Test(ComparisonTestFramework):
self.last_block_time += 1
self.tip = block.sha256
height += 1
- yield TestInstance([[block, True]])
-
- '''
- Check that the new DERSIG rules are enforced in the 751st version 3
- block.
- '''
- spendtx = self.create_transaction(self.nodes[0],
- self.coinbase_blocks[1], self.nodeaddress, 1.0)
- unDERify(spendtx)
- spendtx.rehash()
-
- block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
- block.nVersion = 3
- block.vtx.append(spendtx)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- block.solve()
- self.last_block_time += 1
- yield TestInstance([[block, False]])
+ yield TestInstance([[block, True]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
@@ -177,6 +159,24 @@ class BIP66Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
+ '''
+ Check that the new DERSIG rules are enforced in the 951st version 3
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
diff --git a/qa/rpc-tests/create_cache.py b/qa/rpc-tests/create_cache.py
index b6161e0917..1ace6310d0 100755
--- a/qa/rpc-tests/create_cache.py
+++ b/qa/rpc-tests/create_cache.py
@@ -12,9 +12,15 @@ from test_framework.test_framework import BitcoinTestFramework
class CreateCache(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+
+ # Test network and test nodes are not required:
+ self.num_nodes = 0
+ self.nodes = []
+
def setup_network(self):
- # Don't setup any test nodes
- self.options.noshutdown = True
+ pass
def run_test(self):
pass
diff --git a/qa/rpc-tests/importprunedfunds.py b/qa/rpc-tests/importprunedfunds.py
index d86f51b7f3..0dee8ad4ec 100755
--- a/qa/rpc-tests/importprunedfunds.py
+++ b/qa/rpc-tests/importprunedfunds.py
@@ -5,7 +5,7 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-import decimal
+
class ImportPrunedFundsTest(BitcoinTestFramework):
@@ -20,14 +20,10 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
self.is_network_split=False
self.sync_all()
- def run_test (self):
- import time
- begintime = int(time.time())
-
+ def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(101)
- # sync
self.sync_all()
# address
@@ -72,7 +68,6 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
-
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
@@ -82,28 +77,27 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
#Import with no affiliated address
try:
- result1 = self.nodes[1].importprunedfunds(rawtxn1, proof1, "")
+ self.nodes[1].importprunedfunds(rawtxn1, proof1)
except JSONRPCException as e:
assert('No addresses' in e.error['message'])
else:
assert(False)
-
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
#Import with affiliated address with no rescan
- self.nodes[1].importaddress(address2, "", False)
- result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2, "")
- balance2 = Decimal(self.nodes[1].getbalance("", 0, True))
+ self.nodes[1].importaddress(address2, "add2", False)
+ result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
+ balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
#Import with private key with no rescan
- self.nodes[1].importprivkey(address3_privkey, "", False)
- result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3, "")
- balance3 = Decimal(self.nodes[1].getbalance("", 0, False))
+ self.nodes[1].importprivkey(address3_privkey, "add3", False)
+ result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3)
+ balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
- balance3 = Decimal(self.nodes[1].getbalance("", 0, True))
+ balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
#Addresses Test - after import
@@ -118,7 +112,6 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
assert_equal(address_info['ismine'], True)
#Remove transactions
-
try:
self.nodes[1].removeprunedfunds(txnid1)
except JSONRPCException as e:
@@ -126,18 +119,16 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
else:
assert(False)
-
- balance1 = Decimal(self.nodes[1].getbalance("", 0, True))
+ balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
-
self.nodes[1].removeprunedfunds(txnid2)
- balance2 = Decimal(self.nodes[1].getbalance("", 0, True))
+ balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
- balance3 = Decimal(self.nodes[1].getbalance("", 0, True))
+ balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
- ImportPrunedFundsTest ().main ()
+ ImportPrunedFundsTest().main()
diff --git a/qa/rpc-tests/keypool.py b/qa/rpc-tests/keypool.py
index c75303ecbf..fa39476568 100755
--- a/qa/rpc-tests/keypool.py
+++ b/qa/rpc-tests/keypool.py
@@ -12,6 +12,11 @@ class KeyPoolTest(BitcoinTestFramework):
def run_test(self):
nodes = self.nodes
+ addr_before_encrypting = nodes[0].getnewaddress()
+ addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
+ wallet_info_old = nodes[0].getwalletinfo()
+ assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
+
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
@@ -19,6 +24,11 @@ class KeyPoolTest(BitcoinTestFramework):
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
+ addr_data = nodes[0].validateaddress(addr)
+ wallet_info = nodes[0].getwalletinfo()
+ assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
+ assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
+
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
diff --git a/qa/rpc-tests/nulldummy.py b/qa/rpc-tests/nulldummy.py
new file mode 100755
index 0000000000..eaed7a8c78
--- /dev/null
+++ b/qa/rpc-tests/nulldummy.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
+from test_framework.comptool import TestManager
+from test_framework.script import CScript
+from io import BytesIO
+import time
+
+NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
+
+def trueDummy(tx):
+ scriptSig = CScript(tx.vin[0].scriptSig)
+ newscript = []
+ for i in scriptSig:
+ if (len(newscript) == 0):
+ assert(len(i) == 0)
+ newscript.append(b'\x51')
+ else:
+ newscript.append(i)
+ tx.vin[0].scriptSig = CScript(newscript)
+ tx.rehash()
+
+'''
+This test is meant to exercise NULLDUMMY softfork.
+Connect to a single node.
+Generate 2 blocks (save the coinbases for later).
+Generate 427 more blocks.
+[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
+[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
+[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
+[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
+'''
+
+class NULLDUMMYTest(ComparisonTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']])
+
+ def run_test(self):
+ self.address = self.nodes[0].getnewaddress()
+ self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
+ self.wit_address = self.nodes[0].addwitnessaddress(self.address)
+ self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
+
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
+ coinbase_txid = []
+ for i in self.coinbase_blocks:
+ coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
+ self.nodes[0].generate(427) # Block 429
+ self.lastblockhash = self.nodes[0].getbestblockhash()
+ self.tip = int("0x" + self.lastblockhash, 0)
+ self.lastblockheight = 429
+ self.lastblocktime = int(time.time()) + 429
+
+ print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
+ test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
+ txid1 = self.tx_submit(self.nodes[0], test1txs[0])
+ test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
+ txid2 = self.tx_submit(self.nodes[0], test1txs[1])
+ test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
+ txid3 = self.tx_submit(self.nodes[0], test1txs[2])
+ self.block_submit(self.nodes[0], test1txs, False, True)
+
+ print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
+ test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48)
+ trueDummy(test2tx)
+ txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
+
+ print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
+ self.block_submit(self.nodes[0], [test2tx], False, True)
+
+ print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
+ test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47)
+ test6txs=[CTransaction(test4tx)]
+ trueDummy(test4tx)
+ self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
+ self.block_submit(self.nodes[0], [test4tx])
+
+ print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
+ test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
+ test6txs.append(CTransaction(test5tx))
+ test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
+ self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR)
+ self.block_submit(self.nodes[0], [test5tx], True)
+
+ print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
+ for i in test6txs:
+ self.tx_submit(self.nodes[0], i)
+ self.block_submit(self.nodes[0], test6txs, True, True)
+
+
+ def create_transaction(self, node, txid, to_address, amount):
+ inputs = [{ "txid" : txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+
+ def tx_submit(self, node, tx, msg = ""):
+ tx.rehash()
+ try:
+ node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error["message"], msg)
+ return tx.hash
+
+
+ def block_submit(self, node, txs, witness = False, accept = False):
+ block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
+ block.nVersion = 4
+ for tx in txs:
+ tx.rehash()
+ block.vtx.append(tx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ witness and add_witness_commitment(block)
+ block.rehash()
+ block.solve()
+ node.submitblock(bytes_to_hex_str(block.serialize(True)))
+ if (accept):
+ assert_equal(node.getbestblockhash(), block.hash)
+ self.tip = block.sha256
+ self.lastblockhash = block.hash
+ self.lastblocktime += 1
+ self.lastblockheight += 1
+ else:
+ assert_equal(node.getbestblockhash(), self.lastblockhash)
+
+if __name__ == '__main__':
+ NULLDUMMYTest().main() \ No newline at end of file
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
new file mode 100755
index 0000000000..bf4fb43add
--- /dev/null
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -0,0 +1,624 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import create_block, create_coinbase
+from test_framework.siphash import siphash256
+from test_framework.script import CScript, OP_TRUE
+
+'''
+CompactBlocksTest -- test compact blocks (BIP 152)
+'''
+
+
+# TestNode: A peer we use to send messages to bitcoind, and store responses.
+class TestNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.last_sendcmpct = None
+ self.last_headers = None
+ self.last_inv = None
+ self.last_cmpctblock = None
+ self.block_announced = False
+ self.last_getdata = None
+ self.last_getblocktxn = None
+ self.last_block = None
+ self.last_blocktxn = None
+
+ def on_sendcmpct(self, conn, message):
+ self.last_sendcmpct = message
+
+ def on_block(self, conn, message):
+ self.last_block = message
+
+ def on_cmpctblock(self, conn, message):
+ self.last_cmpctblock = message
+ self.block_announced = True
+
+ def on_headers(self, conn, message):
+ self.last_headers = message
+ self.block_announced = True
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+ self.block_announced = True
+
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_getblocktxn(self, conn, message):
+ self.last_getblocktxn = message
+
+ def on_blocktxn(self, conn, message):
+ self.last_blocktxn = message
+
+ # Requires caller to hold mininode_lock
+ def received_block_announcement(self):
+ return self.block_announced
+
+ def clear_block_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_inv = None
+ self.last_headers = None
+ self.last_cmpctblock = None
+
+ def get_headers(self, locator, hashstop):
+ msg = msg_getheaders()
+ msg.locator.vHave = locator
+ msg.hashstop = hashstop
+ self.connection.send_message(msg)
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [CBlockHeader(b) for b in new_blocks]
+ self.send_message(headers_message)
+
+ def request_headers_and_sync(self, locator, hashstop=0):
+ self.clear_block_announcement()
+ self.get_headers(locator, hashstop)
+ assert(wait_until(self.received_block_announcement, timeout=30))
+ assert(self.received_block_announcement())
+ self.clear_block_announcement()
+
+
+class CompactBlocksTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.utxos = []
+
+ def setup_network(self):
+ self.nodes = []
+
+ # Turn off segwit in this test, as compact blocks don't currently work
+ # with segwit. (After BIP 152 is updated to support segwit, we can
+ # test behavior with and without segwit enabled by adding a second node
+ # to the test.)
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"]])
+
+ def build_block_on_tip(self):
+ height = self.nodes[0].getblockcount()
+ tip = self.nodes[0].getbestblockhash()
+ mtp = self.nodes[0].getblockheader(tip)['mediantime']
+ block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
+ block.solve()
+ return block
+
+ # Create 10 more anyone-can-spend utxo's for testing.
+ def make_utxos(self):
+ block = self.build_block_on_tip()
+ self.test_node.send_and_ping(msg_block(block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
+ self.nodes[0].generate(100)
+
+ total_value = block.vtx[0].vout[0].nValue
+ out_value = total_value // 10
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
+ for i in range(10):
+ tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
+ tx.rehash()
+
+ block2 = self.build_block_on_tip()
+ block2.vtx.append(tx)
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.solve()
+ self.test_node.send_and_ping(msg_block(block2))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
+ self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
+ return
+
+ # Test "sendcmpct":
+ # - No compact block announcements or getdata(MSG_CMPCT_BLOCK) unless
+ # sendcmpct is sent.
+ # - If sendcmpct is sent with version > 1, the message is ignored.
+ # - If sendcmpct is sent with boolean 0, then block announcements are not
+ # made with compact blocks.
+ # - If sendcmpct is then sent with boolean 1, then new block announcements
+ # are made with compact blocks.
+ def test_sendcmpct(self):
+ print("Testing SENDCMPCT p2p message... ")
+
+ # Make sure we get a version 0 SENDCMPCT message from our peer
+ def received_sendcmpct():
+ return (self.test_node.last_sendcmpct is not None)
+ got_message = wait_until(received_sendcmpct, timeout=30)
+ assert(received_sendcmpct())
+ assert(got_message)
+ assert_equal(self.test_node.last_sendcmpct.version, 1)
+
+ tip = int(self.nodes[0].getbestblockhash(), 16)
+
+ def check_announcement_of_new_block(node, peer, predicate):
+ peer.clear_block_announcement()
+ node.generate(1)
+ got_message = wait_until(lambda: peer.block_announced, timeout=30)
+ assert(peer.block_announced)
+ assert(got_message)
+ with mininode_lock:
+ assert(predicate(peer))
+
+ # We shouldn't get any block announcements via cmpctblock yet.
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None)
+
+ # Try one more time, this time after requesting headers.
+ self.test_node.request_headers_and_sync(locator=[tip])
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
+
+ # Test a few ways of using sendcmpct that should NOT
+ # result in compact block announcements.
+ # Before each test, sync the headers chain.
+ self.test_node.request_headers_and_sync(locator=[tip])
+
+ # Now try a SENDCMPCT message with too-high version
+ sendcmpct = msg_sendcmpct()
+ sendcmpct.version = 2
+ self.test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None)
+
+ # Headers sync before next test.
+ self.test_node.request_headers_and_sync(locator=[tip])
+
+ # Now try a SENDCMPCT message with valid version, but announce=False
+ self.test_node.send_and_ping(msg_sendcmpct())
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None)
+
+ # Headers sync before next test.
+ self.test_node.request_headers_and_sync(locator=[tip])
+
+ # Finally, try a SENDCMPCT message with announce=True
+ sendcmpct.version = 1
+ sendcmpct.announce = True
+ self.test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Try one more time (no headers sync should be needed!)
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Try one more time, after turning on sendheaders
+ self.test_node.send_and_ping(msg_sendheaders())
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Now turn off announcements
+ sendcmpct.announce = False
+ self.test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
+
+ # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
+ def test_invalid_cmpctblock_message(self):
+ print("Testing invalid index in cmpctblock message...")
+ self.nodes[0].generate(101)
+ block = self.build_block_on_tip()
+
+ cmpct_block = P2PHeaderAndShortIDs()
+ cmpct_block.header = CBlockHeader(block)
+ cmpct_block.prefilled_txn_length = 1
+ # This index will be too high
+ prefilled_txn = PrefilledTransaction(1, block.vtx[0])
+ cmpct_block.prefilled_txn = [prefilled_txn]
+ self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
+
+ # Compare the generated shortids to what we expect based on BIP 152, given
+ # bitcoind's choice of nonce.
+ def test_compactblock_construction(self):
+ print("Testing compactblock headers and shortIDs are correct...")
+
+ # Generate a bunch of transactions.
+ self.nodes[0].generate(101)
+ num_transactions = 25
+ address = self.nodes[0].getnewaddress()
+ for i in range(num_transactions):
+ self.nodes[0].sendtoaddress(address, 0.1)
+
+ # Now mine a block, and look at the resulting compact block.
+ self.test_node.clear_block_announcement()
+ block_hash = int(self.nodes[0].generate(1)[0], 16)
+
+ # Store the raw block in our internal format.
+ block = FromHex(CBlock(), self.nodes[0].getblock("%02x" % block_hash, False))
+ [tx.calc_sha256() for tx in block.vtx]
+ block.rehash()
+
+ # Don't care which type of announcement came back for this test; just
+ # request the compact block if we didn't get one yet.
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ with mininode_lock:
+ if self.test_node.last_cmpctblock is None:
+ self.test_node.clear_block_announcement()
+ inv = CInv(4, block_hash) # 4 == "CompactBlock"
+ self.test_node.send_message(msg_getdata([inv]))
+
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ # Now we should have the compactblock
+ header_and_shortids = None
+ with mininode_lock:
+ assert(self.test_node.last_cmpctblock is not None)
+ # Convert the on-the-wire representation to absolute indexes
+ header_and_shortids = HeaderAndShortIDs(self.test_node.last_cmpctblock.header_and_shortids)
+
+ # Check that we got the right block!
+ header_and_shortids.header.calc_sha256()
+ assert_equal(header_and_shortids.header.sha256, block_hash)
+
+ # Make sure the prefilled_txn appears to have included the coinbase
+ assert(len(header_and_shortids.prefilled_txn) >= 1)
+ assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
+
+ # Check that all prefilled_txn entries match what's in the block.
+ for entry in header_and_shortids.prefilled_txn:
+ entry.tx.calc_sha256()
+ assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
+
+ # Check that the cmpctblock message announced all the transactions.
+ assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
+
+ # And now check that all the shortids are as expected as well.
+ # Determine the siphash keys to use.
+ [k0, k1] = header_and_shortids.get_siphash_keys()
+
+ index = 0
+ while index < len(block.vtx):
+ if (len(header_and_shortids.prefilled_txn) > 0 and
+ header_and_shortids.prefilled_txn[0].index == index):
+ # Already checked prefilled transactions above
+ header_and_shortids.prefilled_txn.pop(0)
+ else:
+ shortid = calculate_shortid(k0, k1, block.vtx[index].sha256)
+ assert_equal(shortid, header_and_shortids.shortids[0])
+ header_and_shortids.shortids.pop(0)
+ index += 1
+
+ # Test that bitcoind requests compact blocks when we announce new blocks
+ # via header or inv, and that responding to getblocktxn causes the block
+ # to be successfully reconstructed.
+ def test_compactblock_requests(self):
+ print("Testing compactblock requests... ")
+
+ # Try announcing a block with an inv or header, expect a compactblock
+ # request
+ for announce in ["inv", "header"]:
+ block = self.build_block_on_tip()
+ with mininode_lock:
+ self.test_node.last_getdata = None
+
+ if announce == "inv":
+ self.test_node.send_message(msg_inv([CInv(2, block.sha256)]))
+ else:
+ self.test_node.send_header_for_blocks([block])
+ success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=30)
+ assert(success)
+ assert_equal(len(self.test_node.last_getdata.inv), 1)
+ assert_equal(self.test_node.last_getdata.inv[0].type, 4)
+ assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Send back a compactblock message that omits the coinbase
+ comp_block = HeaderAndShortIDs()
+ comp_block.header = CBlockHeader(block)
+ comp_block.nonce = 0
+ comp_block.shortids = [1] # this is useless, and wrong
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
+ # Expect a getblocktxn message.
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [0]) # should be a coinbase request
+
+ # Send the coinbase, and verify that the tip advances.
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = [block.vtx[0]]
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Create a chain of transactions from given utxo, and add to a new block.
+ def build_block_with_transactions(self, utxo, num_transactions):
+ block = self.build_block_on_tip()
+
+ for i in range(num_transactions):
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
+ tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
+ tx.rehash()
+ utxo = [tx.sha256, 0, tx.vout[0].nValue]
+ block.vtx.append(tx)
+
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+ return block
+
+ # Test that we only receive getblocktxn requests for transactions that the
+ # node needs, and that responding to them causes the block to be
+ # reconstructed.
+ def test_getblocktxn_requests(self):
+ print("Testing getblocktxn requests...")
+
+ # First try announcing compactblocks that won't reconstruct, and verify
+ # that we receive getblocktxn messages back.
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block)
+
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [1, 2, 3, 4, 5])
+ msg = msg_blocktxn()
+ msg.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ # Now try interspersing the prefilled transactions
+ comp_block.initialize_from_block(block, prefill_list=[0, 1, 5])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [2, 3, 4])
+ msg.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Now try giving one transaction ahead of time.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ self.test_node.send_and_ping(msg_tx(block.vtx[1]))
+ assert(block.vtx[1].hash in self.nodes[0].getrawmempool())
+
+ # Prefill 4 out of the 6 transactions, and verify that only the one
+ # that was not in the mempool is requested.
+ comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [5])
+
+ msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Now provide all transactions to the node before the block is
+ # announced and verify reconstruction happens immediately.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ for tx in block.vtx[1:]:
+ self.test_node.send_message(msg_tx(tx))
+ self.test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = self.nodes[0].getrawmempool()
+ for tx in block.vtx[1:]:
+ assert(tx.hash in mempool)
+
+ # Clear out last request.
+ with mininode_lock:
+ self.test_node.last_getblocktxn = None
+
+ # Send compact block
+ comp_block.initialize_from_block(block, prefill_list=[0])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ # Shouldn't have gotten a request for any transaction
+ assert(self.test_node.last_getblocktxn is None)
+ # Tip should have updated
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Incorrectly responding to a getblocktxn shouldn't cause the block to be
+ # permanently failed.
+ def test_incorrect_blocktxn_response(self):
+ print("Testing handling of incorrect blocktxn responses...")
+
+ if (len(self.utxos) == 0):
+ self.make_utxos()
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ # Relay the first 5 transactions from the block in advance
+ for tx in block.vtx[1:6]:
+ self.test_node.send_message(msg_tx(tx))
+ self.test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = self.nodes[0].getrawmempool()
+ for tx in block.vtx[1:6]:
+ assert(tx.hash in mempool)
+
+ # Send compact block
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, prefill_list=[0])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ absolute_indexes = []
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
+
+ # Now give an incorrect response.
+ # Note that it's possible for bitcoind to be smart enough to know we're
+ # lying, since it could check to see if the shortid matches what we're
+ # sending, and eg disconnect us for misbehavior. If that behavior
+ # change were made, we could just modify this test by having a
+ # different peer provide the block further down, so that we're still
+ # verifying that the block isn't marked bad permanently. This is good
+ # enough for now.
+ msg = msg_blocktxn()
+ msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
+ self.test_node.send_and_ping(msg)
+
+ # Tip should not have updated
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
+
+ # We should receive a getdata request
+ success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=10)
+ assert(success)
+ assert_equal(len(self.test_node.last_getdata.inv), 1)
+ assert_equal(self.test_node.last_getdata.inv[0].type, 2)
+ assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Deliver the block
+ self.test_node.send_and_ping(msg_block(block))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ def test_getblocktxn_handler(self):
+ print("Testing getblocktxn handler...")
+
+ # bitcoind won't respond for blocks whose height is more than 15 blocks
+ # deep.
+ MAX_GETBLOCKTXN_DEPTH = 15
+ chain_height = self.nodes[0].getblockcount()
+ current_height = chain_height
+ while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
+ block_hash = self.nodes[0].getblockhash(current_height)
+ block = FromHex(CBlock(), self.nodes[0].getblock(block_hash, False))
+
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
+ num_to_request = random.randint(1, len(block.vtx))
+ msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
+ self.test_node.send_message(msg)
+ success = wait_until(lambda: self.test_node.last_blocktxn is not None, timeout=10)
+ assert(success)
+
+ [tx.calc_sha256() for tx in block.vtx]
+ with mininode_lock:
+ assert_equal(self.test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
+ all_indices = msg.block_txn_request.to_absolute()
+ for index in all_indices:
+ tx = self.test_node.last_blocktxn.block_transactions.transactions.pop(0)
+ tx.calc_sha256()
+ assert_equal(tx.sha256, block.vtx[index].sha256)
+ self.test_node.last_blocktxn = None
+ current_height -= 1
+
+ # Next request should be ignored, as we're past the allowed depth.
+ block_hash = self.nodes[0].getblockhash(current_height)
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
+ self.test_node.send_and_ping(msg)
+ with mininode_lock:
+ assert_equal(self.test_node.last_blocktxn, None)
+
+ def test_compactblocks_not_at_tip(self):
+ print("Testing compactblock requests/announcements not at chain tip...")
+
+ # Test that requesting old compactblocks doesn't work.
+ MAX_CMPCTBLOCK_DEPTH = 11
+ new_blocks = []
+ for i in range(MAX_CMPCTBLOCK_DEPTH):
+ self.test_node.clear_block_announcement()
+ new_blocks.append(self.nodes[0].generate(1)[0])
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ self.test_node.clear_block_announcement()
+ self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: self.test_node.last_cmpctblock is not None, timeout=30)
+ assert(success)
+
+ self.test_node.clear_block_announcement()
+ self.nodes[0].generate(1)
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+ self.test_node.clear_block_announcement()
+ self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: self.test_node.last_block is not None, timeout=30)
+ assert(success)
+ with mininode_lock:
+ self.test_node.last_block.block.calc_sha256()
+ assert_equal(self.test_node.last_block.block.sha256, int(new_blocks[0], 16))
+
+ # Generate an old compactblock, and verify that it's not accepted.
+ cur_height = self.nodes[0].getblockcount()
+ hashPrevBlock = int(self.nodes[0].getblockhash(cur_height-5), 16)
+ block = self.build_block_on_tip()
+ block.hashPrevBlock = hashPrevBlock
+ block.solve()
+
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block)
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+
+ tips = self.nodes[0].getchaintips()
+ found = False
+ for x in tips:
+ if x["hash"] == block.hash:
+ assert_equal(x["status"], "headers-only")
+ found = True
+ break
+ assert(found)
+
+ # Requesting this block via getblocktxn should silently fail
+ # (to avoid fingerprinting attacks).
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
+ with mininode_lock:
+ self.test_node.last_blocktxn = None
+ self.test_node.send_and_ping(msg)
+ with mininode_lock:
+ assert(self.test_node.last_blocktxn is None)
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ self.test_node = TestNode()
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
+ self.test_node.add_connection(connections[0])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ self.test_node.wait_for_verack()
+
+ # We will need UTXOs to construct transactions in later tests.
+ self.make_utxos()
+
+ self.test_sendcmpct()
+ self.test_compactblock_construction()
+ self.test_compactblock_requests()
+ self.test_getblocktxn_requests()
+ self.test_getblocktxn_handler()
+ self.test_compactblocks_not_at_tip()
+ self.test_incorrect_blocktxn_response()
+ self.test_invalid_cmpctblock_message()
+
+
+if __name__ == '__main__':
+ CompactBlocksTest().main()
diff --git a/qa/rpc-tests/p2p-feefilter.py b/qa/rpc-tests/p2p-feefilter.py
index cd0501a314..96d99d38a7 100755
--- a/qa/rpc-tests/p2p-feefilter.py
+++ b/qa/rpc-tests/p2p-feefilter.py
@@ -62,6 +62,7 @@ class FeeFilterTest(BitcoinTestFramework):
def run_test(self):
node1 = self.nodes[1]
+ node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
@@ -91,8 +92,17 @@ class FeeFilterTest(BitcoinTestFramework):
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
- time.sleep(10) # wait 10 secs to be sure its doesn't relay any
- assert(allInvsMatch([], test_node))
+
+ # Send one transaction from node0 that should be received, so that we
+ # we can sync the test on receipt (if node1's txs were relayed, they'd
+ # be received by the time this node0 tx is received). This is
+ # unfortunately reliant on the current relay behavior where we batch up
+ # to 35 entries in an inv, which means that when this next transaction
+ # is eligible for relay, the prior transactions from node1 are eligible
+ # as well.
+ node0.settxfee(Decimal("0.00020000"))
+ txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
+ assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Remove fee filter and check that txs are received again
diff --git a/qa/rpc-tests/p2p-fullblocktest.py b/qa/rpc-tests/p2p-fullblocktest.py
index 17fd40ef1d..9aee81164f 100755
--- a/qa/rpc-tests/p2p-fullblocktest.py
+++ b/qa/rpc-tests/p2p-fullblocktest.py
@@ -25,9 +25,6 @@ We use the testing framework in which we expect a particular answer from
each test.
'''
-def hash160(s):
- return hashlib.new('ripemd160', sha256(s)).digest()
-
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
diff --git a/qa/rpc-tests/p2p-mempool.py b/qa/rpc-tests/p2p-mempool.py
index 5d2daf39f8..5c5d778f42 100755
--- a/qa/rpc-tests/p2p-mempool.py
+++ b/qa/rpc-tests/p2p-mempool.py
@@ -72,8 +72,11 @@ class TestNode(NodeConnCB):
self.send_message(msg_mempool())
class P2PMempoolTests(BitcoinTestFramework):
- def setup_chain(self):
- initialize_chain_clean(self.options.tmpdir, 2)
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
diff --git a/qa/rpc-tests/p2p-segwit.py b/qa/rpc-tests/p2p-segwit.py
index b30d41af92..5c1eb21b1f 100755
--- a/qa/rpc-tests/p2p-segwit.py
+++ b/qa/rpc-tests/p2p-segwit.py
@@ -168,8 +168,11 @@ class UTXO(object):
class SegWitTest(BitcoinTestFramework):
- def setup_chain(self):
- initialize_chain_clean(self.options.tmpdir, 3)
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
def add_options(self, parser):
parser.add_option("--oldbinary", dest="oldbinary",
@@ -302,13 +305,18 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
- # it to be too large according to IsStandard.
+ # it to be non-standard, to test that violating policy with a witness before
+ # segwit activation doesn't blind a node to a transaction. Transactions
+ # rejected for having a witness before segwit activation shouldn't be added
+ # to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
+ # Note that this should be rejected for the premature witness reason,
+ # rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
@@ -946,8 +954,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
- # Re-announcing won't result in a getdata for ~2.5 minutes, so just
- # deliver the modified transaction.
+ self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
@@ -961,8 +968,24 @@ class SegWitTest(BitcoinTestFramework):
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
- tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE])))
tx3.wit.vtxinwit.append(CTxInWitness())
+
+ # Add too-large for IsStandard witness and check that it does not enter reject filter
+ p2sh_program = CScript([OP_TRUE])
+ p2sh_pubkey = hash160(p2sh_program)
+ witness_program2 = CScript([b'a'*400000])
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
+ tx3.rehash()
+
+ # Node will not be blinded to the transaction
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+
+ # Remove witness stuffing, instead add extra witness push on stack
+ tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
@@ -1065,12 +1088,12 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
- # Test size, vsize, cost
+ # Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
- cost = 3*len(block.serialize(False)) + len(block.serialize(True))
- assert_equal(rpc_details["cost"], cost)
+ weight = 3*len(block.serialize(False)) + len(block.serialize(True))
+ assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
@@ -1086,6 +1109,82 @@ class SegWitTest(BitcoinTestFramework):
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
+ # V0 segwit outputs should be standard after activation, but not before.
+ def test_standardness_v0(self, segwit_activated):
+ print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
+ assert(len(self.utxo))
+
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ p2sh_pubkey = hash160(witness_program)
+ p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+
+ # First prepare a p2sh output (so that spending it will pass standardness)
+ p2sh_tx = CTransaction()
+ p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
+ p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
+ p2sh_tx.rehash()
+
+ # Mine it on test_node to create the confirmed output.
+ self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Now test standardness of v0 P2WSH outputs.
+ # Start by creating a transaction with two outputs.
+ tx = CTransaction()
+ tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
+ tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
+ tx.rehash()
+
+ self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
+
+ # Now create something that looks like a P2PKH output. This won't be spendable.
+ scriptPubKey = CScript([OP_0, hash160(witness_hash)])
+ tx2 = CTransaction()
+ if segwit_activated:
+ # if tx was accepted, then we spend the second output.
+ tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
+ tx2.vout = [CTxOut(7000, scriptPubKey)]
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ else:
+ # if tx wasn't accepted, we just re-spend the p2sh output we started with.
+ tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
+ tx2.rehash()
+
+ self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
+
+ # Now update self.utxo for later tests.
+ tx3 = CTransaction()
+ if segwit_activated:
+ # tx and tx2 were both accepted. Don't bother trying to reclaim the
+ # P2PKH output; just send tx's first output back to an anyone-can-spend.
+ sync_mempools(self.nodes)
+ tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
+ tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.rehash()
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ else:
+ # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
+ tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
+ tx3.rehash()
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
+
+
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
@@ -1293,6 +1392,9 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
+ # Ping regularly to keep the connection alive
+ if (not i % 100):
+ self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
@@ -1658,6 +1760,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
+ self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
@@ -1679,6 +1782,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
+ self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_signature_version_1()
diff --git a/qa/rpc-tests/p2p-versionbits-warning.py b/qa/rpc-tests/p2p-versionbits-warning.py
index 962cafef0b..00dbbc02cf 100755
--- a/qa/rpc-tests/p2p-versionbits-warning.py
+++ b/qa/rpc-tests/p2p-versionbits-warning.py
@@ -6,6 +6,7 @@
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
+import re
import time
from test_framework.blocktools import create_block, create_coinbase
@@ -21,6 +22,10 @@ VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
+WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
+WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
+VB_PATTERN = re.compile("^Warning.*versionbit")
+
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
@@ -65,16 +70,12 @@ class VersionBitsWarningTest(BitcoinTestFramework):
self.num_nodes = 1
def setup_network(self):
- self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
- with open(self.alert_filename, 'w') as f:
+ with open(self.alert_filename, 'w') as _:
pass
- self.node_options = ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]
- self.nodes.append(start_node(0, self.options.tmpdir, self.node_options))
-
- import re
- self.vb_pattern = re.compile("^Warning.*versionbit")
+ self.extra_args = [["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
# Send numblocks blocks via peer with nVersionToUse set.
def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
@@ -83,7 +84,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
block_time = self.nodes[0].getblockheader(tip)["time"]+1
tip = int(tip, 16)
- for i in range(numblocks):
+ for _ in range(numblocks):
block = create_block(tip, create_coinbase(height+1), block_time)
block.nVersion = nVersionToUse
block.solve()
@@ -96,7 +97,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
def test_versionbits_in_alert_file(self):
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
- assert(self.vb_pattern.match(alert_text))
+ assert(VB_PATTERN.match(alert_text))
def run_test(self):
# Setup the p2p connection and start up the network thread.
@@ -122,8 +123,10 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Fill rest of period with regular version blocks
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
# Check that we're not getting any versionbit-related errors in
- # getinfo()
- assert(not self.vb_pattern.match(self.nodes[0].getinfo()["errors"]))
+ # get*info()
+ assert(not VB_PATTERN.match(self.nodes[0].getinfo()["errors"]))
+ assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["errors"]))
+ assert(not VB_PATTERN.match(self.nodes[0].getnetworkinfo()["warnings"]))
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
# some unknown bit
@@ -132,8 +135,10 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Might not get a versionbits-related alert yet, as we should
# have gotten a different alert due to more than 51/100 blocks
# being of unexpected version.
- # Check that getinfo() shows some kind of error.
- assert(len(self.nodes[0].getinfo()["errors"]) != 0)
+ # Check that get*info() shows some kind of error.
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared, and restart the node. This should move the versionbit state
@@ -142,20 +147,21 @@ class VersionBitsWarningTest(BitcoinTestFramework):
stop_node(self.nodes[0], 0)
wait_bitcoinds()
# Empty out the alert file
- with open(self.alert_filename, 'w') as f:
+ with open(self.alert_filename, 'w') as _:
pass
- self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
# Connecting one block should be enough to generate an error.
self.nodes[0].generate(1)
- assert(len(self.nodes[0].getinfo()["errors"]) != 0)
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"])
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.test_versionbits_in_alert_file()
# Test framework expects the node to still be running...
- self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
-
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
if __name__ == '__main__':
VersionBitsWarningTest().main()
diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py
index c9c2eaf7f3..b769cd71f2 100755
--- a/qa/rpc-tests/rest.py
+++ b/qa/rpc-tests/rest.py
@@ -179,14 +179,14 @@ class RESTTest (BitcoinTestFramework):
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
- assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
- assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
- assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
@@ -194,14 +194,14 @@ class RESTTest (BitcoinTestFramework):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
- assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
+ assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
- assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
+ assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
diff --git a/qa/rpc-tests/rpcbind_test.py b/qa/rpc-tests/rpcbind_test.py
index 572273566b..085024e268 100755
--- a/qa/rpc-tests/rpcbind_test.py
+++ b/qa/rpc-tests/rpcbind_test.py
@@ -5,143 +5,106 @@
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
-# TODO extend this test from the test framework (like all other tests)
-
-import tempfile
-import traceback
-
+from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.netutil import *
-def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
- '''
- Start a node with requested rpcallowip and rpcbind parameters,
- then try to connect, and check if the set of bound addresses
- matches the expected set.
- '''
- expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
- base_args = ['-disablewallet', '-nolisten']
- if allow_ips:
- base_args += ['-rpcallowip=' + x for x in allow_ips]
- binds = ['-rpcbind='+addr for addr in addresses]
- nodes = start_nodes(self.num_nodes, tmpdir, [base_args + binds], connect_to)
- try:
- pid = bitcoind_processes[0].pid
- assert_equal(set(get_bind_addrs(pid)), set(expected))
- finally:
- stop_nodes(nodes)
- wait_bitcoinds()
-
-def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
- '''
- Start a node with rpcwallow IP, and request getinfo
- at a non-localhost IP.
- '''
- base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
- nodes = start_nodes(self.num_nodes, tmpdir, [base_args])
- try:
- # connect to node through non-loopback interface
- url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
- node = get_rpc_proxy(url, 1)
- node.getinfo()
- finally:
- node = None # make sure connection will be garbage collected and closed
- stop_nodes(nodes)
- wait_bitcoinds()
-
-def run_test(tmpdir):
- assert(sys.platform.startswith('linux')) # due to OS-specific network stats queries, this test works only on Linux
- # find the first non-loopback interface for testing
- non_loopback_ip = None
- for name,ip in all_interfaces():
- if ip != '127.0.0.1':
- non_loopback_ip = ip
- break
- if non_loopback_ip is None:
- assert(not 'This test requires at least one non-loopback IPv4 interface')
- print("Using interface %s for testing" % non_loopback_ip)
+class RPCBindTest(BitcoinTestFramework):
- defaultport = rpc_port(0)
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
- # check default without rpcallowip (IPv4 and IPv6 localhost)
- run_bind_test(tmpdir, None, '127.0.0.1', [],
- [('127.0.0.1', defaultport), ('::1', defaultport)])
- # check default with rpcallowip (IPv6 any)
- run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
- [('::0', defaultport)])
- # check only IPv4 localhost (explicit)
- run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
- [('127.0.0.1', defaultport)])
- # check only IPv4 localhost (explicit) with alternative port
- run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
- [('127.0.0.1', 32171)])
- # check only IPv4 localhost (explicit) with multiple alternative ports on same host
- run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
- [('127.0.0.1', 32171), ('127.0.0.1', 32172)])
- # check only IPv6 localhost (explicit)
- run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
- [('::1', defaultport)])
- # check both IPv4 and IPv6 localhost (explicit)
- run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
- [('127.0.0.1', defaultport), ('::1', defaultport)])
- # check only non-loopback interface
- run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
- [(non_loopback_ip, defaultport)])
-
- # Check that with invalid rpcallowip, we are denied
- run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
- try:
- run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
- assert(not 'Connection not denied by rpcallowip as expected')
- except ValueError:
+ def setup_network(self):
pass
-def main():
- import optparse
-
- parser = optparse.OptionParser(usage="%prog [options]")
- parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
- help="Leave bitcoinds and test.* datadir on exit or error")
- parser.add_option("--srcdir", dest="srcdir", default="../../src",
- help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
- parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
- help="Root directory for datadirs")
- (options, args) = parser.parse_args()
-
- os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
-
- check_json_precision()
-
- success = False
- nodes = []
- try:
- print("Initializing test directory "+options.tmpdir)
- if not os.path.isdir(options.tmpdir):
- os.makedirs(options.tmpdir)
- initialize_chain(options.tmpdir)
-
- run_test(options.tmpdir)
-
- success = True
-
- except AssertionError as e:
- print("Assertion failed: "+e.message)
- except Exception as e:
- print("Unexpected exception caught during testing: "+str(e))
- traceback.print_tb(sys.exc_info()[2])
-
- if not options.nocleanup:
- print("Cleaning up")
- wait_bitcoinds()
- shutil.rmtree(options.tmpdir)
+ def setup_nodes(self):
+ pass
- if success:
- print("Tests successful")
- sys.exit(0)
- else:
- print("Failed")
- sys.exit(1)
+ def run_bind_test(self, allow_ips, connect_to, addresses, expected):
+ '''
+ Start a node with requested rpcallowip and rpcbind parameters,
+ then try to connect, and check if the set of bound addresses
+ matches the expected set.
+ '''
+ expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
+ base_args = ['-disablewallet', '-nolisten']
+ if allow_ips:
+ base_args += ['-rpcallowip=' + x for x in allow_ips]
+ binds = ['-rpcbind='+addr for addr in addresses]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
+ try:
+ pid = bitcoind_processes[0].pid
+ assert_equal(set(get_bind_addrs(pid)), set(expected))
+ finally:
+ stop_nodes(self.nodes)
+ wait_bitcoinds()
+
+ def run_allowip_test(self, allow_ips, rpchost, rpcport):
+ '''
+ Start a node with rpcallow IP, and request getnetworkinfo
+ at a non-localhost IP.
+ '''
+ base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
+ try:
+ # connect to node through non-loopback interface
+ node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
+ node.getnetworkinfo()
+ finally:
+ node = None # make sure connection will be garbage collected and closed
+ stop_nodes(self.nodes)
+ wait_bitcoinds()
+
+ def run_test(self):
+ # due to OS-specific network stats queries, this test works only on Linux
+ assert(sys.platform.startswith('linux'))
+ # find the first non-loopback interface for testing
+ non_loopback_ip = None
+ for name,ip in all_interfaces():
+ if ip != '127.0.0.1':
+ non_loopback_ip = ip
+ break
+ if non_loopback_ip is None:
+ assert(not 'This test requires at least one non-loopback IPv4 interface')
+ print("Using interface %s for testing" % non_loopback_ip)
+
+ defaultport = rpc_port(0)
+
+ # check default without rpcallowip (IPv4 and IPv6 localhost)
+ self.run_bind_test(None, '127.0.0.1', [],
+ [('127.0.0.1', defaultport), ('::1', defaultport)])
+ # check default with rpcallowip (IPv6 any)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
+ [('::0', defaultport)])
+ # check only IPv4 localhost (explicit)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
+ [('127.0.0.1', defaultport)])
+ # check only IPv4 localhost (explicit) with alternative port
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
+ [('127.0.0.1', 32171)])
+ # check only IPv4 localhost (explicit) with multiple alternative ports on same host
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
+ [('127.0.0.1', 32171), ('127.0.0.1', 32172)])
+ # check only IPv6 localhost (explicit)
+ self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
+ [('::1', defaultport)])
+ # check both IPv4 and IPv6 localhost (explicit)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
+ [('127.0.0.1', defaultport), ('::1', defaultport)])
+ # check only non-loopback interface
+ self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
+ [(non_loopback_ip, defaultport)])
+
+ # Check that with invalid rpcallowip, we are denied
+ self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
+ try:
+ self.run_allowip_test(['1.1.1.1'], non_loopback_ip, defaultport)
+ assert(not 'Connection not denied by rpcallowip as expected')
+ except JSONRPCException:
+ pass
if __name__ == '__main__':
- main()
+ RPCBindTest().main()
diff --git a/qa/rpc-tests/segwit.py b/qa/rpc-tests/segwit.py
index d4c9a8afed..745a1d4750 100755
--- a/qa/rpc-tests/segwit.py
+++ b/qa/rpc-tests/segwit.py
@@ -10,8 +10,6 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160
-import os
-import shutil
NODE_0 = 0
NODE_1 = 1
@@ -69,11 +67,17 @@ def getutxo(txid):
utxo["txid"] = txid
return utxo
+def find_unspent(node, min_value):
+ for utxo in node.listunspent():
+ if utxo['amount'] >= min_value:
+ return utxo
+
class SegWitTest(BitcoinTestFramework):
- def setup_chain(self):
- print("Initializing test directory "+self.options.tmpdir)
- initialize_chain_clean(self.options.tmpdir, 3)
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
def setup_network(self):
self.nodes = []
@@ -117,8 +121,21 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
def run_test(self):
- self.nodes[0].generate(160) #block 160
-
+ self.nodes[0].generate(161) #block 161
+
+ print("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(tmpl['sigoplimit'] == 20000)
+ assert(tmpl['transactions'][0]['hash'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 2)
+ tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
+ assert(tmpl['sigoplimit'] == 20000)
+ assert(tmpl['transactions'][0]['hash'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 2)
+ self.nodes[0].generate(1) #block 162
+
+ balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
@@ -137,18 +154,18 @@ class SegWitTest(BitcoinTestFramework):
for i in range(5):
for n in range(3):
for v in range(2):
- wit_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], False, Decimal("49.999")))
- p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], True, Decimal("49.999")))
+ wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
+ p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
- self.nodes[0].generate(1) #block 161
+ self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
- assert_equal(self.nodes[0].getbalance(), 60*50 - 60*50 + 20*Decimal("49.999") + 50)
+ assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
- self.nodes[0].generate(262) #block 423
+ self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
@@ -205,5 +222,25 @@ class SegWitTest(BitcoinTestFramework):
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
+ print("Verify sigops are counted in GBT with BIP141 rules after the fork")
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
+ assert(tmpl['sigoplimit'] == 80000)
+ assert(tmpl['transactions'][0]['txid'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 8)
+
+ print("Verify non-segwit miners get a valid GBT response after the fork")
+ send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.998"))
+ try:
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(len(tmpl['transactions']) == 1) # Doesn't include witness tx
+ assert(tmpl['sigoplimit'] == 20000)
+ assert(tmpl['transactions'][0]['hash'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 2)
+ assert(('!segwit' in tmpl['rules']) or ('segwit' not in tmpl['rules']))
+ except JSONRPCException:
+ # This is an acceptable outcome
+ pass
+
if __name__ == '__main__':
SegWitTest().main()
diff --git a/qa/rpc-tests/test_framework/authproxy.py b/qa/rpc-tests/test_framework/authproxy.py
index d095a56ce7..f5e0be20d1 100644
--- a/qa/rpc-tests/test_framework/authproxy.py
+++ b/qa/rpc-tests/test_framework/authproxy.py
@@ -55,7 +55,11 @@ log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
- Exception.__init__(self)
+ try:
+ errmsg = '%(message)s (%(code)i)' % rpc_error
+ except (KeyError, TypeError):
+ errmsg = ''
+ Exception.__init__(self, errmsg)
self.error = rpc_error
diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py
index 6120dd574b..1e2bbb277a 100644
--- a/qa/rpc-tests/test_framework/blockstore.py
+++ b/qa/rpc-tests/test_framework/blockstore.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index cdd5292cd6..caffab3535 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -36,9 +36,10 @@ from threading import RLock
from threading import Thread
import logging
import copy
+from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
-MY_VERSION = 60001 # past bip-31 for ping/pong
+MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MAX_INV_SZ = 50000
@@ -52,7 +53,7 @@ NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
-# ourselves (to workaround an issue with closing an asyncore socket when
+# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
@@ -74,8 +75,19 @@ def ripemd160(s):
def hash256(s):
return sha256(sha256(s))
+def ser_compact_size(l):
+ r = b""
+ if l < 253:
+ r = struct.pack("B", l)
+ elif l < 0x10000:
+ r = struct.pack("<BH", 253, l)
+ elif l < 0x100000000:
+ r = struct.pack("<BI", 254, l)
+ else:
+ r = struct.pack("<BQ", 255, l)
+ return r
-def deser_string(f):
+def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
@@ -83,16 +95,14 @@ def deser_string(f):
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
+ return nit
+
+def deser_string(f):
+ nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
- if len(s) < 253:
- return struct.pack("B", len(s)) + s
- elif len(s) < 0x10000:
- return struct.pack("<BH", 253, len(s)) + s
- elif len(s) < 0x100000000:
- return struct.pack("<BI", 254, len(s)) + s
- return struct.pack("<BQ", 255, len(s)) + s
+ return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
@@ -125,13 +135,7 @@ def uint256_from_compact(c):
def deser_vector(f, c):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
@@ -144,15 +148,7 @@ def deser_vector(f, c):
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
@@ -162,13 +158,7 @@ def ser_vector(l, ser_function_name=None):
def deser_uint256_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
@@ -177,28 +167,14 @@ def deser_uint256_vector(f):
def ser_uint256_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
@@ -207,28 +183,14 @@ def deser_string_vector(f):
def ser_string_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
@@ -237,15 +199,7 @@ def deser_int_vector(f):
def ser_int_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
@@ -294,7 +248,8 @@ class CInv(object):
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
- 2|MSG_WITNESS_FLAG : "WitnessBlock"
+ 2|MSG_WITNESS_FLAG : "WitnessBlock",
+ 4: "CompactBlock"
}
def __init__(self, t=0, h=0):
@@ -781,6 +736,187 @@ class CAlert(object):
% (len(self.vchMsg), len(self.vchSig))
+class PrefilledTransaction(object):
+ def __init__(self, index=0, tx = None):
+ self.index = index
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.index = deser_compact_size(f)
+ self.tx = CTransaction()
+ self.tx.deserialize(f)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_compact_size(self.index)
+ if with_witness:
+ r += self.tx.serialize_with_witness()
+ else:
+ r += self.tx.serialize_without_witness()
+ return r
+
+ def __repr__(self):
+ return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
+
+# This is what we send on the wire, in a cmpctblock message.
+class P2PHeaderAndShortIDs(object):
+ def __init__(self):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids_length = 0
+ self.shortids = []
+ self.prefilled_txn_length = 0
+ self.prefilled_txn = []
+
+ def deserialize(self, f):
+ self.header.deserialize(f)
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+ self.shortids_length = deser_compact_size(f)
+ for i in range(self.shortids_length):
+ # shortids are defined to be 6 bytes in the spec, so append
+ # two zero bytes and read it in as an 8-byte number
+ self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
+ self.prefilled_txn = deser_vector(f, PrefilledTransaction)
+ self.prefilled_txn_length = len(self.prefilled_txn)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += self.header.serialize()
+ r += struct.pack("<Q", self.nonce)
+ r += ser_compact_size(self.shortids_length)
+ for x in self.shortids:
+ # We only want the first 6 bytes
+ r += struct.pack("<Q", x)[0:6]
+ r += ser_vector(self.prefilled_txn)
+ return r
+
+ def __repr__(self):
+ return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
+
+
+# Calculate the BIP 152-compact blocks shortid for a given transaction hash
+def calculate_shortid(k0, k1, tx_hash):
+ expected_shortid = siphash256(k0, k1, tx_hash)
+ expected_shortid &= 0x0000ffffffffffff
+ return expected_shortid
+
+# This version gets rid of the array lengths, and reinterprets the differential
+# encoding into indices that can be used for lookup.
+class HeaderAndShortIDs(object):
+ def __init__(self, p2pheaders_and_shortids = None):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids = []
+ self.prefilled_txn = []
+
+ if p2pheaders_and_shortids != None:
+ self.header = p2pheaders_and_shortids.header
+ self.nonce = p2pheaders_and_shortids.nonce
+ self.shortids = p2pheaders_and_shortids.shortids
+ last_index = -1
+ for x in p2pheaders_and_shortids.prefilled_txn:
+ self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
+ last_index = self.prefilled_txn[-1].index
+
+ def to_p2p(self):
+ ret = P2PHeaderAndShortIDs()
+ ret.header = self.header
+ ret.nonce = self.nonce
+ ret.shortids_length = len(self.shortids)
+ ret.shortids = self.shortids
+ ret.prefilled_txn_length = len(self.prefilled_txn)
+ ret.prefilled_txn = []
+ last_index = -1
+ for x in self.prefilled_txn:
+ ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
+ last_index = x.index
+ return ret
+
+ def get_siphash_keys(self):
+ header_nonce = self.header.serialize()
+ header_nonce += struct.pack("<Q", self.nonce)
+ hash_header_nonce_as_str = sha256(header_nonce)
+ key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
+ key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
+ return [ key0, key1 ]
+
+ def initialize_from_block(self, block, nonce=0, prefill_list = [0]):
+ self.header = CBlockHeader(block)
+ self.nonce = nonce
+ self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
+ self.shortids = []
+ [k0, k1] = self.get_siphash_keys()
+ for i in range(len(block.vtx)):
+ if i not in prefill_list:
+ self.shortids.append(calculate_shortid(k0, k1, block.vtx[i].sha256))
+
+ def __repr__(self):
+ return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
+
+
+class BlockTransactionsRequest(object):
+
+ def __init__(self, blockhash=0, indexes = None):
+ self.blockhash = blockhash
+ self.indexes = indexes if indexes != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ indexes_length = deser_compact_size(f)
+ for i in range(indexes_length):
+ self.indexes.append(deser_compact_size(f))
+
+ def serialize(self):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ r += ser_compact_size(len(self.indexes))
+ for x in self.indexes:
+ r += ser_compact_size(x)
+ return r
+
+ # helper to set the differentially encoded indexes from absolute ones
+ def from_absolute(self, absolute_indexes):
+ self.indexes = []
+ last_index = -1
+ for x in absolute_indexes:
+ self.indexes.append(x-last_index-1)
+ last_index = x
+
+ def to_absolute(self):
+ absolute_indexes = []
+ last_index = -1
+ for x in self.indexes:
+ absolute_indexes.append(x+last_index+1)
+ last_index = absolute_indexes[-1]
+ return absolute_indexes
+
+ def __repr__(self):
+ return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
+
+
+class BlockTransactions(object):
+
+ def __init__(self, blockhash=0, transactions = None):
+ self.blockhash = blockhash
+ self.transactions = transactions if transactions != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ self.transactions = deser_vector(f, CTransaction)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ if with_witness:
+ r += ser_vector(self.transactions, "serialize_with_witness")
+ else:
+ r += ser_vector(self.transactions)
+ return r
+
+ def __repr__(self):
+ return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
+
+
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
@@ -1215,6 +1351,79 @@ class msg_feefilter(object):
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
+class msg_sendcmpct(object):
+ command = b"sendcmpct"
+
+ def __init__(self):
+ self.announce = False
+ self.version = 1
+
+ def deserialize(self, f):
+ self.announce = struct.unpack("<?", f.read(1))[0]
+ self.version = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<?", self.announce)
+ r += struct.pack("<Q", self.version)
+ return r
+
+ def __repr__(self):
+ return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
+
+class msg_cmpctblock(object):
+ command = b"cmpctblock"
+
+ def __init__(self, header_and_shortids = None):
+ self.header_and_shortids = header_and_shortids
+
+ def deserialize(self, f):
+ self.header_and_shortids = P2PHeaderAndShortIDs()
+ self.header_and_shortids.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.header_and_shortids.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
+
+class msg_getblocktxn(object):
+ command = b"getblocktxn"
+
+ def __init__(self):
+ self.block_txn_request = None
+
+ def deserialize(self, f):
+ self.block_txn_request = BlockTransactionsRequest()
+ self.block_txn_request.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_txn_request.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
+
+class msg_blocktxn(object):
+ command = b"blocktxn"
+
+ def __init__(self):
+ self.block_transactions = BlockTransactions()
+
+ def deserialize(self, f):
+ self.block_transactions.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_transactions.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
+
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
@@ -1295,6 +1504,10 @@ class NodeConnCB(object):
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
+ def on_sendcmpct(self, conn, message): pass
+ def on_cmpctblock(self, conn, message): pass
+ def on_getblocktxn(self, conn, message): pass
+ def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
@@ -1311,6 +1524,10 @@ class SingleNodeConnCB(NodeConnCB):
def send_message(self, message):
self.connection.send_message(message)
+ def send_and_ping(self, message):
+ self.send_message(message)
+ self.sync_with_ping()
+
def on_pong(self, conn, message):
self.last_pong = message
@@ -1344,7 +1561,11 @@ class NodeConn(asyncore.dispatcher):
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
- b"sendheaders": msg_sendheaders
+ b"sendheaders": msg_sendheaders,
+ b"sendcmpct": msg_sendcmpct,
+ b"cmpctblock": msg_cmpctblock,
+ b"getblocktxn": msg_getblocktxn,
+ b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
diff --git a/qa/rpc-tests/test_framework/script.py b/qa/rpc-tests/test_framework/script.py
index b46c643ccb..83bbf20479 100644
--- a/qa/rpc-tests/test_framework/script.py
+++ b/qa/rpc-tests/test_framework/script.py
@@ -882,7 +882,7 @@ def SignatureHash(script, txTo, inIdx, hashtype):
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
- txtmp.vout.append(CTxOut())
+ txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
diff --git a/qa/rpc-tests/test_framework/siphash.py b/qa/rpc-tests/test_framework/siphash.py
new file mode 100644
index 0000000000..9c0574bd93
--- /dev/null
+++ b/qa/rpc-tests/test_framework/siphash.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#
+# siphash.py - Specialized SipHash-2-4 implementations
+#
+# This implements SipHash-2-4 for 256-bit integers.
+
+def rotl64(n, b):
+ return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
+
+def siphash_round(v0, v1, v2, v3):
+ v0 = (v0 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 13)
+ v1 ^= v0
+ v0 = rotl64(v0, 32)
+ v2 = (v2 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 16)
+ v3 ^= v2
+ v0 = (v0 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 21)
+ v3 ^= v0
+ v2 = (v2 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 17)
+ v1 ^= v2
+ v2 = rotl64(v2, 32)
+ return (v0, v1, v2, v3)
+
+def siphash256(k0, k1, h):
+ n0 = h & ((1 << 64) - 1)
+ n1 = (h >> 64) & ((1 << 64) - 1)
+ n2 = (h >> 128) & ((1 << 64) - 1)
+ n3 = (h >> 192) & ((1 << 64) - 1)
+ v0 = 0x736f6d6570736575 ^ k0
+ v1 = 0x646f72616e646f6d ^ k1
+ v2 = 0x6c7967656e657261 ^ k0
+ v3 = 0x7465646279746573 ^ k1 ^ n0
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n0
+ v3 ^= n1
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n1
+ v3 ^= n2
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n2
+ v3 ^= n3
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n3
+ v3 ^= 0x2000000000000000
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= 0x2000000000000000
+ v2 ^= 0xFF
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ return v0 ^ v1 ^ v2 ^ v3
diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py
index 0dfece6b27..186cf866cf 100755
--- a/qa/rpc-tests/test_framework/test_framework.py
+++ b/qa/rpc-tests/test_framework/test_framework.py
@@ -48,7 +48,7 @@ class BitcoinTestFramework(object):
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
- initialize_chain(self.options.tmpdir, self.num_nodes)
+ initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
@@ -112,6 +112,8 @@ class BitcoinTestFramework(object):
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
+ parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
+ help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
@@ -123,7 +125,8 @@ class BitcoinTestFramework(object):
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
- self.options.tmpdir += '/' + str(self.options.port_seed)
+ # backup dir variable for removal at cleanup
+ self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@@ -174,6 +177,8 @@ class BitcoinTestFramework(object):
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
+ if not os.listdir(self.options.root):
+ os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index 32fe79efc3..eee77f1a10 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -125,12 +125,16 @@ def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
+ maxheight = 0
while timeout > 0:
- tips = [ x.getbestblockhash() for x in rpc_connections ]
+ tips = [ x.waitforblockheight(maxheight, int(wait * 1000)) for x in rpc_connections ]
+ heights = [ x["height"] for x in tips ]
if tips == [ tips[0] ]*len(tips):
return True
- time.sleep(wait)
+ if heights == [ heights[0] ]*len(heights): #heights are the same but hashes are not
+ raise AssertionError("Block sync failed")
timeout -= wait
+ maxheight = max(heights)
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
@@ -171,7 +175,15 @@ def rpc_auth_pair(n):
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
- return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, rpchost or '127.0.0.1', rpc_port(i))
+ host = '127.0.0.1'
+ port = rpc_port(i)
+ if rpchost:
+ parts = rpchost.split(':')
+ if len(parts) == 2:
+ host, port = parts
+ else:
+ host = rpchost
+ return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
@@ -193,7 +205,7 @@ def wait_for_bitcoind_start(process, url, i):
raise # unkown JSON RPC exception
time.sleep(0.25)
-def initialize_chain(test_dir, num_nodes):
+def initialize_chain(test_dir, num_nodes, cachedir):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
@@ -202,7 +214,7 @@ def initialize_chain(test_dir, num_nodes):
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
- if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
+ if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
create_cache = True
break
@@ -210,12 +222,12 @@ def initialize_chain(test_dir, num_nodes):
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
- if os.path.isdir(os.path.join("cache","node"+str(i))):
- shutil.rmtree(os.path.join("cache","node"+str(i)))
+ if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
+ shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
- datadir=initialize_datadir("cache", i)
+ datadir=initialize_datadir(cachedir, i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
@@ -257,13 +269,13 @@ def initialize_chain(test_dir, num_nodes):
wait_bitcoinds()
disable_mocktime()
for i in range(MAX_NODES):
- os.remove(log_filename("cache", i, "debug.log"))
- os.remove(log_filename("cache", i, "db.log"))
- os.remove(log_filename("cache", i, "peers.dat"))
- os.remove(log_filename("cache", i, "fee_estimates.dat"))
+ os.remove(log_filename(cachedir, i, "debug.log"))
+ os.remove(log_filename(cachedir, i, "db.log"))
+ os.remove(log_filename(cachedir, i, "peers.dat"))
+ os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
- from_dir = os.path.join("cache", "node"+str(i))
+ from_dir = os.path.join(cachedir, "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
diff --git a/qa/rpc-tests/wallet-accounts.py b/qa/rpc-tests/wallet-accounts.py
new file mode 100755
index 0000000000..c51181e4f8
--- /dev/null
+++ b/qa/rpc-tests/wallet-accounts.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ start_nodes,
+ start_node,
+ assert_equal,
+ connect_nodes_bi,
+)
+
+
+class WalletAccountsTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.node_args = [[]]
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
+ self.is_network_split = False
+
+ def run_test (self):
+ node = self.nodes[0]
+ # Check that there's no UTXO on any of the nodes
+ assert_equal(len(node.listunspent()), 0)
+
+ node.generate(101)
+
+ assert_equal(node.getbalance(), 50)
+
+ accounts = ["a","b","c","d","e"]
+ amount_to_send = 1.0
+ account_addresses = dict()
+ for account in accounts:
+ address = node.getaccountaddress(account)
+ account_addresses[account] = address
+
+ node.getnewaddress(account)
+ assert_equal(node.getaccount(address), account)
+ assert(address in node.getaddressesbyaccount(account))
+
+ node.sendfrom("", address, amount_to_send)
+
+ node.generate(1)
+
+ for i in range(len(accounts)):
+ from_account = accounts[i]
+ to_account = accounts[(i+1)%len(accounts)]
+ to_address = account_addresses[to_account]
+ node.sendfrom(from_account, to_address, amount_to_send)
+
+ node.generate(1)
+
+ for account in accounts:
+ address = node.getaccountaddress(account)
+ assert(address != account_addresses[account])
+ assert_equal(node.getreceivedbyaccount(account), 2)
+ node.move(account, "", node.getbalance(account))
+
+ node.generate(101)
+
+ expected_account_balances = {"": 5200}
+ for account in accounts:
+ expected_account_balances[account] = 0
+
+ assert_equal(node.listaccounts(), expected_account_balances)
+
+ assert_equal(node.getbalance(""), 5200)
+
+ for account in accounts:
+ address = node.getaccountaddress("")
+ node.setaccount(address, account)
+ assert(address in node.getaddressesbyaccount(account))
+ assert(address not in node.getaddressesbyaccount(""))
+
+ for account in accounts:
+ addresses = []
+ for x in range(10):
+ addresses.append(node.getnewaddress())
+ multisig_address = node.addmultisigaddress(5, addresses, account)
+ node.sendfrom("", multisig_address, 50)
+
+ node.generate(101)
+
+ for account in accounts:
+ assert_equal(node.getbalance(account), 50)
+
+if __name__ == '__main__':
+ WalletAccountsTest().main ()
diff --git a/qa/rpc-tests/wallet-dump.py b/qa/rpc-tests/wallet-dump.py
new file mode 100755
index 0000000000..6028d2c20b
--- /dev/null
+++ b/qa/rpc-tests/wallet-dump.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
+
+
+def read_dump(file_name, addrs, hd_master_addr_old):
+ """
+ Read the given dump, count the addrs that match, count change and reserve.
+ Also check that the old hd_master is inactive
+ """
+ with open(file_name) as inputfile:
+ found_addr = 0
+ found_addr_chg = 0
+ found_addr_rsv = 0
+ hd_master_addr_ret = None
+ for line in inputfile:
+ # only read non comment lines
+ if line[0] != "#" and len(line) > 10:
+ # split out some data
+ key_label, comment = line.split("#")
+ # key = key_label.split(" ")[0]
+ keytype = key_label.split(" ")[2]
+ if len(comment) > 1:
+ addr_keypath = comment.split(" addr=")[1]
+ addr = addr_keypath.split(" ")[0]
+ keypath = None
+ if keytype == "inactivehdmaster=1":
+ # ensure the old master is still available
+ assert(hd_master_addr_old == addr)
+ elif keytype == "hdmaster=1":
+ # ensure we have generated a new hd master key
+ assert(hd_master_addr_old != addr)
+ hd_master_addr_ret = addr
+ else:
+ keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
+
+ # count key types
+ for addrObj in addrs:
+ if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
+ found_addr += 1
+ break
+ elif keytype == "change=1":
+ found_addr_chg += 1
+ break
+ elif keytype == "reserve=1":
+ found_addr_rsv += 1
+ break
+ return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
+
+
+class WalletDumpTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+ self.extra_args = [["-keypool=90"]]
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+
+ def run_test (self):
+ tmpdir = self.options.tmpdir
+
+ # generate 20 addresses to compare against the dump
+ test_addr_count = 20
+ addrs = []
+ for i in range(0,test_addr_count):
+ addr = self.nodes[0].getnewaddress()
+ vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
+ addrs.append(vaddr)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
+
+ # dump unencrypted wallet
+ self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
+
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
+ read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
+ assert_equal(found_addr, test_addr_count) # all keys must be in the dump
+ assert_equal(found_addr_chg, 50) # 50 blocks where mined
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
+
+ #encrypt wallet, restart, unlock and dump
+ self.nodes[0].encryptwallet('test')
+ bitcoind_processes[0].wait()
+ self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
+ self.nodes[0].walletpassphrase('test', 10)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
+ self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
+
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
+ read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
+ assert_equal(found_addr, test_addr_count)
+ assert_equal(found_addr_chg, 90 + 1 + 50) # old reserve keys are marked as change now
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
+
+if __name__ == '__main__':
+ WalletDumpTest().main ()
diff --git a/qa/rpc-tests/wallet-hd.py b/qa/rpc-tests/wallet-hd.py
index c738ee2207..a49d91f6f4 100755
--- a/qa/rpc-tests/wallet-hd.py
+++ b/qa/rpc-tests/wallet-hd.py
@@ -31,7 +31,7 @@ class WalletHDTest(BitcoinTestFramework):
tmpdir = self.options.tmpdir
# Make sure we use hd, keep masterkeyid
- masterkeyid = self.nodes[1].getwalletinfo()['masterkeyid']
+ masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# Import a non-HD private key in the HD wallet
@@ -39,8 +39,8 @@ class WalletHDTest(BitcoinTestFramework):
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
- self.nodes[1].backupwallet(tmpdir + "hd.bak")
- #self.nodes[1].dumpwallet(tmpdir + "hd.dump")
+ self.nodes[1].backupwallet(tmpdir + "/hd.bak")
+ #self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
@@ -63,7 +63,7 @@ class WalletHDTest(BitcoinTestFramework):
print("Restore backup ...")
self.stop_node(1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
- shutil.copyfile(tmpdir + "hd.bak", tmpdir + "/node1/regtest/wallet.dat")
+ shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
#connect_nodes_bi(self.nodes, 0, 1)
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index 5d96e7a6e5..3420be1a2e 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -18,9 +18,10 @@ class WalletTest (BitcoinTestFramework):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
+ self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
- self.nodes = start_nodes(3, self.options.tmpdir)
+ self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
@@ -154,7 +155,7 @@ class WalletTest (BitcoinTestFramework):
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
- self.nodes.append(start_node(3, self.options.tmpdir))
+ self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3]))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
diff --git a/qa/rpc-tests/walletbackup.py b/qa/rpc-tests/walletbackup.py
index b991d5c761..e12cb10a50 100755
--- a/qa/rpc-tests/walletbackup.py
+++ b/qa/rpc-tests/walletbackup.py
@@ -45,12 +45,12 @@ class WalletBackupTest(BitcoinTestFramework):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
+ # nodes 1, 2,3 are spenders, let's give them a keypool=100
+ self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
- # nodes 1, 2,3 are spenders, let's give them a keypool=100
- extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
- self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
@@ -79,6 +79,7 @@ class WalletBackupTest(BitcoinTestFramework):
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
+ sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):