aboutsummaryrefslogtreecommitdiff
path: root/qa
diff options
context:
space:
mode:
Diffstat (limited to 'qa')
-rw-r--r--qa/README.md57
-rwxr-xr-xqa/pull-tester/rpc-tests.py5
-rw-r--r--qa/replace-by-fee/.gitignore1
-rw-r--r--qa/replace-by-fee/README.md13
-rwxr-xr-xqa/replace-by-fee/rbf-tests.py360
-rw-r--r--qa/rpc-tests/README.md49
-rwxr-xr-xqa/rpc-tests/replace-by-fee.py512
-rwxr-xr-xqa/rpc-tests/script_test.py259
-rwxr-xr-xqa/rpc-tests/sendheaders.py519
-rwxr-xr-xqa/rpc-tests/smartfees.py52
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py32
11 files changed, 1526 insertions, 333 deletions
diff --git a/qa/README.md b/qa/README.md
new file mode 100644
index 0000000000..758d1f47e5
--- /dev/null
+++ b/qa/README.md
@@ -0,0 +1,57 @@
+The [pull-tester](/qa/pull-tester/) folder contains a script to call
+multiple tests from the [rpc-tests](/qa/rpc-tests/) folder.
+
+Every pull request to the bitcoin repository is built and run through
+the regression test suite. You can also run all or only individual
+tests locally.
+
+Running tests
+=============
+
+You can run any single test by calling `qa/pull-tester/rpc-tests.py <testname>`.
+
+Or you can run any combination of tests by calling `qa/pull-tester/rpc-tests.py <testname1> <testname2> <testname3> ...`
+
+Run the regression test suite with `qa/pull-tester/rpc-tests.py`
+
+Run all possible tests with `qa/pull-tester/rpc-tests.py -extended`
+
+Possible options:
+
+```
+ -h, --help show this help message and exit
+ --nocleanup Leave bitcoinds and test.* datadir on exit or error
+ --noshutdown Don't stop bitcoinds after the test execution
+ --srcdir=SRCDIR Source directory containing bitcoind/bitcoin-cli
+ (default: ../../src)
+ --tmpdir=TMPDIR Root directory for datadirs
+ --tracerpc Print out all RPC calls as they are made
+ --coveragedir=COVERAGEDIR
+ Write tested RPC commands into this directory
+```
+
+If you set the environment variable `PYTHON_DEBUG=1` you will get some debug
+output (example: `PYTHON_DEBUG=1 qa/pull-tester/rpc-tests.py wallet`).
+
+A 200-block -regtest blockchain and wallets for four nodes
+is created the first time a regression test is run and
+is stored in the cache/ directory. Each node has 25 mature
+blocks (25*50=1250 BTC) in its wallet.
+
+After the first run, the cache/ blockchain and wallets are
+copied into a temporary directory and used as the initial
+test state.
+
+If you get into a bad state, you should be able
+to recover with:
+
+```bash
+rm -rf cache
+killall bitcoind
+```
+
+Writing tests
+=============
+You are encouraged to write tests for new or existing features.
+Further information about the test framework and individual rpc
+tests is found in [qa/rpc-tests](/qa/rpc-tests).
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index 83fc9b8f96..5004b09c18 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -69,6 +69,7 @@ if EXEEXT == ".exe" and "-win" not in opts:
testScripts = [
'wallet.py',
'listtransactions.py',
+ 'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
@@ -90,6 +91,7 @@ testScripts = [
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
+ 'sendheaders.py',
]
testScriptsExt = [
'bip65-cltv.py',
@@ -104,15 +106,14 @@ testScriptsExt = [
'forknotify.py',
'invalidateblock.py',
'keypool.py',
- 'receivedby.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
-# 'script_test.py', #used for manual comparison of 2 binaries
'smartfees.py',
'maxblocksinflight.py',
'invalidblockrequest.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
+ 'replace-by-fee.py',
]
#Enable ZMQ tests
diff --git a/qa/replace-by-fee/.gitignore b/qa/replace-by-fee/.gitignore
new file mode 100644
index 0000000000..b2c4f4657a
--- /dev/null
+++ b/qa/replace-by-fee/.gitignore
@@ -0,0 +1 @@
+python-bitcoinlib
diff --git a/qa/replace-by-fee/README.md b/qa/replace-by-fee/README.md
new file mode 100644
index 0000000000..baad86de9a
--- /dev/null
+++ b/qa/replace-by-fee/README.md
@@ -0,0 +1,13 @@
+Replace-by-fee regression tests
+===============================
+
+First get version v0.5.0 of the python-bitcoinlib library. In this directory
+run:
+
+ git clone -n https://github.com/petertodd/python-bitcoinlib
+ (cd python-bitcoinlib && git checkout 8270bfd9c6ac37907d75db3d8b9152d61c7255cd)
+
+Then run the tests themselves with a bitcoind available running in regtest
+mode:
+
+ ./rbf-tests.py
diff --git a/qa/replace-by-fee/rbf-tests.py b/qa/replace-by-fee/rbf-tests.py
new file mode 100755
index 0000000000..1ee6c83875
--- /dev/null
+++ b/qa/replace-by-fee/rbf-tests.py
@@ -0,0 +1,360 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#
+# Test replace-by-fee
+#
+
+import os
+import sys
+
+# Add python-bitcoinlib to module search path, prior to any system-wide
+# python-bitcoinlib.
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinlib"))
+
+import unittest
+
+import bitcoin
+bitcoin.SelectParams('regtest')
+
+import bitcoin.rpc
+
+from bitcoin.core import *
+from bitcoin.core.script import *
+from bitcoin.wallet import *
+
+MAX_REPLACEMENT_LIMIT = 100
+
+class Test_ReplaceByFee(unittest.TestCase):
+ proxy = None
+
+ @classmethod
+ def setUpClass(cls):
+ if cls.proxy is None:
+ cls.proxy = bitcoin.rpc.Proxy()
+
+ @classmethod
+ def mine_mempool(cls):
+ """Mine until mempool is empty"""
+ mempool_size = 1
+ while mempool_size:
+ cls.proxy.call('generate', 1)
+ new_mempool_size = len(cls.proxy.getrawmempool())
+
+ # It's possible to get stuck in a loop here if the mempool has
+ # transactions that can't be mined.
+ assert(new_mempool_size != mempool_size)
+ mempool_size = new_mempool_size
+
+ @classmethod
+ def tearDownClass(cls):
+ # Make sure mining works
+ cls.mine_mempool()
+
+ def make_txout(self, amount, confirmed=True, scriptPubKey=CScript([1])):
+ """Create a txout with a given amount and scriptPubKey
+
+ Mines coins as needed.
+
+ confirmed - txouts created will be confirmed in the blockchain;
+ unconfirmed otherwise.
+ """
+ fee = 1*COIN
+ while self.proxy.getbalance() < amount + fee:
+ self.proxy.call('generate', 100)
+
+ addr = P2SHBitcoinAddress.from_redeemScript(CScript([]))
+ txid = self.proxy.sendtoaddress(addr, amount + fee)
+
+ tx1 = self.proxy.getrawtransaction(txid)
+
+ i = None
+ for i, txout in enumerate(tx1.vout):
+ if txout.scriptPubKey == addr.to_scriptPubKey():
+ break
+ assert i is not None
+
+ tx2 = CTransaction([CTxIn(COutPoint(txid, i), CScript([1, CScript([])]), nSequence=0)],
+ [CTxOut(amount, scriptPubKey)])
+
+ tx2_txid = self.proxy.sendrawtransaction(tx2, True)
+
+ # If requested, ensure txouts are confirmed.
+ if confirmed:
+ self.mine_mempool()
+
+ return COutPoint(tx2_txid, 0)
+
+ def test_simple_doublespend(self):
+ """Simple doublespend"""
+ tx0_outpoint = self.make_txout(1.1*COIN)
+
+ tx1a = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(1*COIN, CScript([b'a']))])
+ tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
+
+ # Should fail because we haven't changed the fee
+ tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(1*COIN, CScript([b'b']))])
+
+ try:
+ tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26) # insufficient fee
+ else:
+ self.fail()
+
+ # Extra 0.1 BTC fee
+ tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(0.9*COIN, CScript([b'b']))])
+ tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
+
+ # tx1a is in fact replaced
+ with self.assertRaises(IndexError):
+ self.proxy.getrawtransaction(tx1a_txid)
+
+ self.assertEqual(tx1b, self.proxy.getrawtransaction(tx1b_txid))
+
+ def test_doublespend_chain(self):
+ """Doublespend of a long chain"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = self.make_txout(initial_nValue)
+
+ prevout = tx0_outpoint
+ remaining_value = initial_nValue
+ chain_txids = []
+ while remaining_value > 10*COIN:
+ remaining_value -= 1*COIN
+ tx = CTransaction([CTxIn(prevout, nSequence=0)],
+ [CTxOut(remaining_value, CScript([1]))])
+ txid = self.proxy.sendrawtransaction(tx, True)
+ chain_txids.append(txid)
+ prevout = COutPoint(txid, 0)
+
+ # Whether the double-spend is allowed is evaluated by including all
+ # child fees - 40 BTC - so this attempt is rejected.
+ dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(initial_nValue - 30*COIN, CScript([1]))])
+
+ try:
+ self.proxy.sendrawtransaction(dbl_tx, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26) # insufficient fee
+ else:
+ self.fail()
+
+ # Accepted with sufficient fee
+ dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(1*COIN, CScript([1]))])
+ self.proxy.sendrawtransaction(dbl_tx, True)
+
+ for doublespent_txid in chain_txids:
+ with self.assertRaises(IndexError):
+ self.proxy.getrawtransaction(doublespent_txid)
+
+ def test_doublespend_tree(self):
+ """Doublespend of a big tree of transactions"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = self.make_txout(initial_nValue)
+
+ def branch(prevout, initial_value, max_txs, *, tree_width=5, fee=0.0001*COIN, _total_txs=None):
+ if _total_txs is None:
+ _total_txs = [0]
+ if _total_txs[0] >= max_txs:
+ return
+
+ txout_value = (initial_value - fee) // tree_width
+ if txout_value < fee:
+ return
+
+ vout = [CTxOut(txout_value, CScript([i+1]))
+ for i in range(tree_width)]
+ tx = CTransaction([CTxIn(prevout, nSequence=0)],
+ vout)
+
+ self.assertTrue(len(tx.serialize()) < 100000)
+ txid = self.proxy.sendrawtransaction(tx, True)
+ yield tx
+ _total_txs[0] += 1
+
+ for i, txout in enumerate(tx.vout):
+ yield from branch(COutPoint(txid, i), txout_value,
+ max_txs,
+ tree_width=tree_width, fee=fee,
+ _total_txs=_total_txs)
+
+ fee = 0.0001*COIN
+ n = MAX_REPLACEMENT_LIMIT
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ self.assertEqual(len(tree_txs), n)
+
+ # Attempt double-spend, will fail because too little fee paid
+ dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(initial_nValue - fee*n, CScript([1]))])
+ try:
+ self.proxy.sendrawtransaction(dbl_tx, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26) # insufficient fee
+ else:
+ self.fail()
+
+ # 1 BTC fee is enough
+ dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))])
+ self.proxy.sendrawtransaction(dbl_tx, True)
+
+ for tx in tree_txs:
+ with self.assertRaises(IndexError):
+ self.proxy.getrawtransaction(tx.GetHash())
+
+ # Try again, but with more total transactions than the "max txs
+ # double-spent at once" anti-DoS limit.
+ for n in (MAX_REPLACEMENT_LIMIT, MAX_REPLACEMENT_LIMIT*2):
+ fee = 0.0001*COIN
+ tx0_outpoint = self.make_txout(initial_nValue)
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ self.assertEqual(len(tree_txs), n)
+
+ dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(initial_nValue - fee*n, CScript([1]))])
+ try:
+ self.proxy.sendrawtransaction(dbl_tx, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26)
+ else:
+ self.fail()
+
+ for tx in tree_txs:
+ self.proxy.getrawtransaction(tx.GetHash())
+
+ def test_replacement_feeperkb(self):
+ """Replacement requires fee-per-KB to be higher"""
+ tx0_outpoint = self.make_txout(1.1*COIN)
+
+ tx1a = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(1*COIN, CScript([b'a']))])
+ tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
+
+ # Higher fee, but the fee per KB is much lower, so the replacement is
+ # rejected.
+ tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
+ [CTxOut(0.001*COIN,
+ CScript([b'a'*999000]))])
+
+ try:
+ tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26) # insufficient fee
+ else:
+ self.fail()
+
+ def test_spends_of_conflicting_outputs(self):
+ """Replacements that spend conflicting tx outputs are rejected"""
+ utxo1 = self.make_txout(1.2*COIN)
+ utxo2 = self.make_txout(3.0*COIN)
+
+ tx1a = CTransaction([CTxIn(utxo1, nSequence=0)],
+ [CTxOut(1.1*COIN, CScript([b'a']))])
+ tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
+
+ # Direct spend an output of the transaction we're replacing.
+ tx2 = CTransaction([CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
+ CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)],
+ tx1a.vout)
+
+ try:
+ tx2_txid = self.proxy.sendrawtransaction(tx2, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26)
+ else:
+ self.fail()
+
+ # Spend tx1a's output to test the indirect case.
+ tx1b = CTransaction([CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)],
+ [CTxOut(1.0*COIN, CScript([b'a']))])
+ tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
+
+ tx2 = CTransaction([CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
+ CTxIn(COutPoint(tx1b_txid, 0))],
+ tx1a.vout)
+
+ try:
+ tx2_txid = self.proxy.sendrawtransaction(tx2, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26)
+ else:
+ self.fail()
+
+ def test_new_unconfirmed_inputs(self):
+ """Replacements that add new unconfirmed inputs are rejected"""
+ confirmed_utxo = self.make_txout(1.1*COIN)
+ unconfirmed_utxo = self.make_txout(0.1*COIN, False)
+
+ tx1 = CTransaction([CTxIn(confirmed_utxo)],
+ [CTxOut(1.0*COIN, CScript([b'a']))])
+ tx1_txid = self.proxy.sendrawtransaction(tx1, True)
+
+ tx2 = CTransaction([CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)],
+ tx1.vout)
+
+ try:
+ tx2_txid = self.proxy.sendrawtransaction(tx2, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26)
+ else:
+ self.fail()
+
+ def test_too_many_replacements(self):
+ """Replacements that evict too many transactions are rejected"""
+ # Try directly replacing more than MAX_REPLACEMENT_LIMIT
+ # transactions
+
+ # Start by creating a single transaction with many outputs
+ initial_nValue = 10*COIN
+ utxo = self.make_txout(initial_nValue)
+ fee = 0.0001*COIN
+ split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
+ actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
+
+ outputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ outputs.append(CTxOut(split_value, CScript([1])))
+
+ splitting_tx = CTransaction([CTxIn(utxo, nSequence=0)], outputs)
+ txid = self.proxy.sendrawtransaction(splitting_tx, True)
+
+ # Now spend each of those outputs individually
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ tx_i = CTransaction([CTxIn(COutPoint(txid, i), nSequence=0)],
+ [CTxOut(split_value-fee, CScript([b'a']))])
+ self.proxy.sendrawtransaction(tx_i, True)
+
+ # Now create doublespend of the whole lot, should fail
+ # Need a big enough fee to cover all spending transactions and have
+ # a higher fee rate
+ double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
+ inputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
+ double_tx = CTransaction(inputs, [CTxOut(double_spend_value, CScript([b'a']))])
+
+ try:
+ self.proxy.sendrawtransaction(double_tx, True)
+ except bitcoin.rpc.JSONRPCException as exp:
+ self.assertEqual(exp.error['code'], -26)
+ self.assertEqual("too many potential replacements" in exp.error['message'], True)
+ else:
+ self.fail()
+
+ # If we remove an input, it should pass
+ double_tx = CTransaction(inputs[0:-1],
+ [CTxOut(double_spend_value, CScript([b'a']))])
+
+ self.proxy.sendrawtransaction(double_tx, True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/qa/rpc-tests/README.md b/qa/rpc-tests/README.md
index d2db00362f..898931936b 100644
--- a/qa/rpc-tests/README.md
+++ b/qa/rpc-tests/README.md
@@ -1,10 +1,8 @@
Regression tests
================
-### [python-bitcoinrpc](https://github.com/jgarzik/python-bitcoinrpc)
-Git subtree of [https://github.com/jgarzik/python-bitcoinrpc](https://github.com/jgarzik/python-bitcoinrpc).
-Changes to python-bitcoinrpc should be made upstream, and then
-pulled here using git subtree.
+### [test_framework/authproxy.py](test_framework/authproxy.py)
+Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
### [test_framework/test_framework.py](test_framework/test_framework.py)
Base class for new regression tests.
@@ -33,49 +31,6 @@ Helpers for script.py
### [test_framework/blocktools.py](test_framework/blocktools.py)
Helper functions for creating blocks and transactions.
-
-Notes
-=====
-
-You can run any single test by calling `qa/pull-tester/rpc-tests.py <testname>`.
-
-Or you can run any combination of tests by calling `qa/pull-tester/rpc-tests.py <testname1> <testname2> <testname3> ...`
-
-Run the regression test suite with `qa/pull-tester/rpc-tests.py`
-
-Run all possible tests with `qa/pull-tester/rpc-tests.py -extended`
-
-Possible options:
-
-```
--h, --help show this help message and exit
- --nocleanup Leave bitcoinds and test.* datadir on exit or error
- --noshutdown Don't stop bitcoinds after the test execution
- --srcdir=SRCDIR Source directory containing bitcoind/bitcoin-cli (default:
- ../../src)
- --tmpdir=TMPDIR Root directory for datadirs
- --tracerpc Print out all RPC calls as they are made
-```
-
-If you set the environment variable `PYTHON_DEBUG=1` you will get some debug output (example: `PYTHON_DEBUG=1 qa/pull-tester/rpc-tests.py wallet`).
-
-A 200-block -regtest blockchain and wallets for four nodes
-is created the first time a regression test is run and
-is stored in the cache/ directory. Each node has 25 mature
-blocks (25*50=1250 BTC) in its wallet.
-
-After the first run, the cache/ blockchain and wallets are
-copied into a temporary directory and used as the initial
-test state.
-
-If you get into a bad state, you should be able
-to recover with:
-
-```bash
-rm -rf cache
-killall bitcoind
-```
-
P2P test design notes
---------------------
diff --git a/qa/rpc-tests/replace-by-fee.py b/qa/rpc-tests/replace-by-fee.py
new file mode 100755
index 0000000000..537a1ed8d9
--- /dev/null
+++ b/qa/rpc-tests/replace-by-fee.py
@@ -0,0 +1,512 @@
+#!/usr/bin/env python2
+# Copyright (c) 2014-2015 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#
+# Test replace by fee code
+#
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.script import *
+from test_framework.mininode import *
+import binascii
+
+COIN = 100000000
+MAX_REPLACEMENT_LIMIT = 100
+
+def satoshi_round(amount):
+ return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+
+def txToHex(tx):
+ return binascii.hexlify(tx.serialize()).decode('utf-8')
+
+def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
+ """Create a txout with a given amount and scriptPubKey
+
+ Mines coins as needed.
+
+ confirmed - txouts created will be confirmed in the blockchain;
+ unconfirmed otherwise.
+ """
+ fee = 1*COIN
+ while node.getbalance() < satoshi_round((amount + fee)/COIN):
+ node.generate(100)
+ #print (node.getbalance(), amount, fee)
+
+ new_addr = node.getnewaddress()
+ #print new_addr
+ txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
+ tx1 = node.getrawtransaction(txid, 1)
+ txid = int(txid, 16)
+ i = None
+
+ for i, txout in enumerate(tx1['vout']):
+ #print i, txout['scriptPubKey']['addresses']
+ if txout['scriptPubKey']['addresses'] == [new_addr]:
+ #print i
+ break
+ assert i is not None
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(COutPoint(txid, i))]
+ tx2.vout = [CTxOut(amount, scriptPubKey)]
+ tx2.rehash()
+
+ tx2_hex = binascii.hexlify(tx2.serialize()).decode('utf-8')
+ #print tx2_hex
+
+ signed_tx = node.signrawtransaction(binascii.hexlify(tx2.serialize()).decode('utf-8'))
+
+ txid = node.sendrawtransaction(signed_tx['hex'], True)
+
+ # If requested, ensure txouts are confirmed.
+ if confirmed:
+ while len(node.getrawmempool()):
+ node.generate(1)
+
+ return COutPoint(int(txid, 16), 0)
+
+class ReplaceByFeeTest(BitcoinTestFramework):
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
+ "-relaypriority=0", "-whitelist=127.0.0.1"]))
+ self.is_network_split = False
+
+ def run_test(self):
+ make_utxo(self.nodes[0], 1*COIN)
+
+ print "Running test simple doublespend..."
+ self.test_simple_doublespend()
+
+ print "Running test doublespend chain..."
+ self.test_doublespend_chain()
+
+ print "Running test doublespend tree..."
+ self.test_doublespend_tree()
+
+ print "Running test replacement feeperkb..."
+ self.test_replacement_feeperkb()
+
+ print "Running test spends of conflicting outputs..."
+ self.test_spends_of_conflicting_outputs()
+
+ print "Running test new unconfirmed inputs..."
+ self.test_new_unconfirmed_inputs()
+
+ print "Running test too many replacements..."
+ self.test_too_many_replacements()
+
+ print "Running test opt-in..."
+ self.test_opt_in()
+
+ print "Passed\n"
+
+ def test_simple_doublespend(self):
+ """Simple doublespend"""
+ tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Should fail because we haven't changed the fee
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+
+ try:
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26) # insufficient fee
+ else:
+ assert(False)
+
+ # Extra 0.1 BTC fee
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+
+ assert (tx1a_txid not in mempool)
+ assert (tx1b_txid in mempool)
+
+ assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
+
+ def test_doublespend_chain(self):
+ """Doublespend of a long chain"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+
+ prevout = tx0_outpoint
+ remaining_value = initial_nValue
+ chain_txids = []
+ while remaining_value > 10*COIN:
+ remaining_value -= 1*COIN
+ tx = CTransaction()
+ tx.vin = [CTxIn(prevout, nSequence=0)]
+ tx.vout = [CTxOut(remaining_value, CScript([1]))]
+ tx_hex = txToHex(tx)
+ txid = self.nodes[0].sendrawtransaction(tx_hex, True)
+ chain_txids.append(txid)
+ prevout = COutPoint(int(txid, 16), 0)
+
+ # Whether the double-spend is allowed is evaluated by including all
+ # child fees - 40 BTC - so this attempt is rejected.
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+
+ try:
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26) # insufficient fee
+ else:
+ assert(False) # transaction mistakenly accepted!
+
+ # Accepted with sufficient fee
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+ for doublespent_txid in chain_txids:
+ assert(doublespent_txid not in mempool)
+
+ def test_doublespend_tree(self):
+ """Doublespend of a big tree of transactions"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+
+ def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
+ if _total_txs is None:
+ _total_txs = [0]
+ if _total_txs[0] >= max_txs:
+ return
+
+ txout_value = (initial_value - fee) // tree_width
+ if txout_value < fee:
+ return
+
+ vout = [CTxOut(txout_value, CScript([i+1]))
+ for i in range(tree_width)]
+ tx = CTransaction()
+ tx.vin = [CTxIn(prevout, nSequence=0)]
+ tx.vout = vout
+ tx_hex = txToHex(tx)
+
+ assert(len(tx.serialize()) < 100000)
+ txid = self.nodes[0].sendrawtransaction(tx_hex, True)
+ yield tx
+ _total_txs[0] += 1
+
+ txid = int(txid, 16)
+
+ for i, txout in enumerate(tx.vout):
+ for x in branch(COutPoint(txid, i), txout_value,
+ max_txs,
+ tree_width=tree_width, fee=fee,
+ _total_txs=_total_txs):
+ yield x
+
+ fee = 0.0001*COIN
+ n = MAX_REPLACEMENT_LIMIT
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ assert_equal(len(tree_txs), n)
+
+ # Attempt double-spend, will fail because too little fee paid
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ try:
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26) # insufficient fee
+ else:
+ assert(False)
+
+ # 1 BTC fee is enough
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+
+ for tx in tree_txs:
+ tx.rehash()
+ assert (tx.hash not in mempool)
+
+ # Try again, but with more total transactions than the "max txs
+ # double-spent at once" anti-DoS limit.
+ for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
+ fee = 0.0001*COIN
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ assert_equal(len(tree_txs), n)
+
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ try:
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ assert_equal("too many potential replacements" in exp.error['message'], True)
+ else:
+ assert(False)
+
+ for tx in tree_txs:
+ tx.rehash()
+ self.nodes[0].getrawtransaction(tx.hash)
+
+ def test_replacement_feeperkb(self):
+ """Replacement requires fee-per-KB to be higher"""
+ tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Higher fee, but the fee per KB is much lower, so the replacement is
+ # rejected.
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*999000]))]
+ tx1b_hex = txToHex(tx1b)
+
+ try:
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26) # insufficient fee
+ else:
+ assert(False)
+
+ def test_spends_of_conflicting_outputs(self):
+ """Replacements that spend conflicting tx outputs are rejected"""
+ utxo1 = make_utxo(self.nodes[0], 1.2*COIN)
+ utxo2 = make_utxo(self.nodes[0], 3.0*COIN)
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(utxo1, nSequence=0)]
+ tx1a.vout = [CTxOut(1.1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ tx1a_txid = int(tx1a_txid, 16)
+
+ # Direct spend an output of the transaction we're replacing.
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
+ tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
+ tx2.vout = tx1a.vout
+ tx2_hex = txToHex(tx2)
+
+ try:
+ tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ else:
+ assert(False)
+
+ # Spend tx1a's output to test the indirect case.
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
+ tx1b.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
+ tx1b_hex = txToHex(tx1b)
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+ tx1b_txid = int(tx1b_txid, 16)
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
+ CTxIn(COutPoint(tx1b_txid, 0))]
+ tx2.vout = tx1a.vout
+ tx2_hex = txToHex(tx2)
+
+ try:
+ tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ else:
+ assert(False)
+
+ def test_new_unconfirmed_inputs(self):
+ """Replacements that add new unconfirmed inputs are rejected"""
+ confirmed_utxo = make_utxo(self.nodes[0], 1.1*COIN)
+ unconfirmed_utxo = make_utxo(self.nodes[0], 0.1*COIN, False)
+
+ tx1 = CTransaction()
+ tx1.vin = [CTxIn(confirmed_utxo)]
+ tx1.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
+ tx1_hex = txToHex(tx1)
+ tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
+ tx2.vout = tx1.vout
+ tx2_hex = txToHex(tx2)
+
+ try:
+ tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ else:
+ assert(False)
+
+ def test_too_many_replacements(self):
+ """Replacements that evict too many transactions are rejected"""
+ # Try directly replacing more than MAX_REPLACEMENT_LIMIT
+ # transactions
+
+ # Start by creating a single transaction with many outputs
+ initial_nValue = 10*COIN
+ utxo = make_utxo(self.nodes[0], initial_nValue)
+ fee = 0.0001*COIN
+ split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
+ actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
+
+ outputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ outputs.append(CTxOut(split_value, CScript([1])))
+
+ splitting_tx = CTransaction()
+ splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
+ splitting_tx.vout = outputs
+ splitting_tx_hex = txToHex(splitting_tx)
+
+ txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
+ txid = int(txid, 16)
+
+ # Now spend each of those outputs individually
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ tx_i = CTransaction()
+ tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
+ tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
+ tx_i_hex = txToHex(tx_i)
+ self.nodes[0].sendrawtransaction(tx_i_hex, True)
+
+ # Now create doublespend of the whole lot; should fail.
+ # Need a big enough fee to cover all spending transactions and have
+ # a higher fee rate
+ double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
+ inputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
+ double_tx = CTransaction()
+ double_tx.vin = inputs
+ double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
+ double_tx_hex = txToHex(double_tx)
+
+ try:
+ self.nodes[0].sendrawtransaction(double_tx_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ assert_equal("too many potential replacements" in exp.error['message'], True)
+ else:
+ assert(False)
+
+ # If we remove an input, it should pass
+ double_tx = CTransaction()
+ double_tx.vin = inputs[0:-1]
+ double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
+ double_tx_hex = txToHex(double_tx)
+ self.nodes[0].sendrawtransaction(double_tx_hex, True)
+
+ def test_opt_in(self):
+ """ Replacing should only work if orig tx opted in """
+ tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
+
+ # Create a non-opting in transaction
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Shouldn't be able to double-spend
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+
+ try:
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ else:
+ print tx1b_txid
+ assert(False)
+
+ tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
+
+ # Create a different non-opting in transaction
+ tx2a = CTransaction()
+ tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
+ tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx2a_hex = txToHex(tx2a)
+ tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
+
+ # Still shouldn't be able to double-spend
+ tx2b = CTransaction()
+ tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
+ tx2b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
+ tx2b_hex = txToHex(tx2b)
+
+ try:
+ tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error['code'], -26)
+ else:
+ assert(False)
+
+ # Now create a new transaction that spends from tx1a and tx2a
+ # opt-in on one of the inputs
+ # Transaction should be replaceable on either input
+
+ tx1a_txid = int(tx1a_txid, 16)
+ tx2a_txid = int(tx2a_txid, 16)
+
+ tx3a = CTransaction()
+ tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
+ CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
+ tx3a.vout = [CTxOut(0.9*COIN, CScript([b'c'])), CTxOut(0.9*COIN, CScript([b'd']))]
+ tx3a_hex = txToHex(tx3a)
+
+ self.nodes[0].sendrawtransaction(tx3a_hex, True)
+
+ tx3b = CTransaction()
+ tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
+ tx3b.vout = [CTxOut(0.5*COIN, CScript([b'e']))]
+ tx3b_hex = txToHex(tx3b)
+
+ tx3c = CTransaction()
+ tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
+ tx3c.vout = [CTxOut(0.5*COIN, CScript([b'f']))]
+ tx3c_hex = txToHex(tx3c)
+
+ self.nodes[0].sendrawtransaction(tx3b_hex, True)
+ # If tx3b was accepted, tx3c won't look like a replacement,
+ # but make sure it is accepted anyway
+ self.nodes[0].sendrawtransaction(tx3c_hex, True)
+
+if __name__ == '__main__':
+ ReplaceByFeeTest().main()
diff --git a/qa/rpc-tests/script_test.py b/qa/rpc-tests/script_test.py
deleted file mode 100755
index afc44b51b5..0000000000
--- a/qa/rpc-tests/script_test.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python2
-#
-# Distributed under the MIT/X11 software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-
-'''
-Test notes:
-This test uses the script_valid and script_invalid tests from the unittest
-framework to do end-to-end testing where we compare that two nodes agree on
-whether blocks containing a given test script are valid.
-
-We generally ignore the script flags associated with each test (since we lack
-the precision to test each script using those flags in this framework), but
-for tests with SCRIPT_VERIFY_P2SH, we can use a block time after the BIP16
-switchover date to try to test with that flag enabled (and for tests without
-that flag, we use a block time before the switchover date).
-
-NOTE: This test is very slow and may take more than 40 minutes to run.
-'''
-
-from test_framework.test_framework import ComparisonTestFramework
-from test_framework.util import *
-from test_framework.comptool import TestInstance, TestManager
-from test_framework.mininode import *
-from test_framework.blocktools import *
-from test_framework.script import *
-import logging
-import copy
-import json
-
-script_valid_file = "../../src/test/data/script_valid.json"
-script_invalid_file = "../../src/test/data/script_invalid.json"
-
-# Pass in a set of json files to open.
-class ScriptTestFile(object):
-
- def __init__(self, files):
- self.files = files
- self.index = -1
- self.data = []
-
- def load_files(self):
- for f in self.files:
- self.data.extend(json.loads(open(os.path.dirname(os.path.abspath(__file__))+"/"+f).read()))
-
- # Skip over records that are not long enough to be tests
- def get_records(self):
- while (self.index < len(self.data)):
- if len(self.data[self.index]) >= 3:
- yield self.data[self.index]
- self.index += 1
-
-
-# Helper for parsing the flags specified in the .json files
-SCRIPT_VERIFY_NONE = 0
-SCRIPT_VERIFY_P2SH = 1
-SCRIPT_VERIFY_STRICTENC = 1 << 1
-SCRIPT_VERIFY_DERSIG = 1 << 2
-SCRIPT_VERIFY_LOW_S = 1 << 3
-SCRIPT_VERIFY_NULLDUMMY = 1 << 4
-SCRIPT_VERIFY_SIGPUSHONLY = 1 << 5
-SCRIPT_VERIFY_MINIMALDATA = 1 << 6
-SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS = 1 << 7
-SCRIPT_VERIFY_CLEANSTACK = 1 << 8
-
-flag_map = {
- "": SCRIPT_VERIFY_NONE,
- "NONE": SCRIPT_VERIFY_NONE,
- "P2SH": SCRIPT_VERIFY_P2SH,
- "STRICTENC": SCRIPT_VERIFY_STRICTENC,
- "DERSIG": SCRIPT_VERIFY_DERSIG,
- "LOW_S": SCRIPT_VERIFY_LOW_S,
- "NULLDUMMY": SCRIPT_VERIFY_NULLDUMMY,
- "SIGPUSHONLY": SCRIPT_VERIFY_SIGPUSHONLY,
- "MINIMALDATA": SCRIPT_VERIFY_MINIMALDATA,
- "DISCOURAGE_UPGRADABLE_NOPS": SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS,
- "CLEANSTACK": SCRIPT_VERIFY_CLEANSTACK,
-}
-
-def ParseScriptFlags(flag_string):
- flags = 0
- for x in flag_string.split(","):
- if x in flag_map:
- flags |= flag_map[x]
- else:
- print "Error: unrecognized script flag: ", x
- return flags
-
-'''
-Given a string that is a scriptsig or scriptpubkey from the .json files above,
-convert it to a CScript()
-'''
-# Replicates behavior from core_read.cpp
-def ParseScript(json_script):
- script = json_script.split(" ")
- parsed_script = CScript()
- for x in script:
- if len(x) == 0:
- # Empty string, ignore.
- pass
- elif x.isdigit() or (len(x) >= 1 and x[0] == "-" and x[1:].isdigit()):
- # Number
- n = int(x, 0)
- if (n == -1) or (n >= 1 and n <= 16):
- parsed_script = CScript(bytes(parsed_script) + bytes(CScript([n])))
- else:
- parsed_script += CScriptNum(int(x, 0))
- elif x.startswith("0x"):
- # Raw hex data, inserted NOT pushed onto stack:
- for i in xrange(2, len(x), 2):
- parsed_script = CScript(bytes(parsed_script) + bytes(chr(int(x[i:i+2],16))))
- elif x.startswith("'") and x.endswith("'") and len(x) >= 2:
- # Single-quoted string, pushed as data.
- parsed_script += CScript([x[1:-1]])
- else:
- # opcode, e.g. OP_ADD or ADD:
- tryopname = "OP_" + x
- if tryopname in OPCODES_BY_NAME:
- parsed_script += CScriptOp(OPCODES_BY_NAME["OP_" + x])
- else:
- print "ParseScript: error parsing '%s'" % x
- return ""
- return parsed_script
-
-class TestBuilder(object):
- def create_credit_tx(self, scriptPubKey, height):
- # self.tx1 is a coinbase transaction, modeled after the one created by script_tests.cpp
- # This allows us to reuse signatures created in the unit test framework.
- self.tx1 = create_coinbase(height) # this has a bip34 scriptsig,
- self.tx1.vin[0].scriptSig = CScript([0, 0]) # but this matches the unit tests
- self.tx1.vout[0].nValue = 0
- self.tx1.vout[0].scriptPubKey = scriptPubKey
- self.tx1.rehash()
- def create_spend_tx(self, scriptSig):
- self.tx2 = create_transaction(self.tx1, 0, CScript(), 0)
- self.tx2.vin[0].scriptSig = scriptSig
- self.tx2.vout[0].scriptPubKey = CScript()
- self.tx2.rehash()
- def rehash(self):
- self.tx1.rehash()
- self.tx2.rehash()
-
-# This test uses the (default) two nodes provided by ComparisonTestFramework,
-# specified on the command line with --testbinary and --refbinary.
-# See comptool.py
-class ScriptTest(ComparisonTestFramework):
-
- def run_test(self):
- # Set up the comparison tool TestManager
- test = TestManager(self, self.options.tmpdir)
- test.add_all_connections(self.nodes)
-
- # Load scripts
- self.scripts = ScriptTestFile([script_valid_file, script_invalid_file])
- self.scripts.load_files()
-
- # Some variables we re-use between test instances (to build blocks)
- self.tip = None
- self.block_time = None
-
- NetworkThread().start() # Start up network handling in another thread
- test.run()
-
- def generate_test_instance(self, pubkeystring, scriptsigstring):
- scriptpubkey = ParseScript(pubkeystring)
- scriptsig = ParseScript(scriptsigstring)
-
- test = TestInstance(sync_every_block=False)
- test_build = TestBuilder()
- test_build.create_credit_tx(scriptpubkey, self.height)
- test_build.create_spend_tx(scriptsig)
- test_build.rehash()
-
- block = create_block(self.tip, test_build.tx1, self.block_time)
- self.block_time += 1
- block.solve()
- self.tip = block.sha256
- self.height += 1
- test.blocks_and_transactions = [[block, True]]
-
- for i in xrange(100):
- block = create_block(self.tip, create_coinbase(self.height), self.block_time)
- self.block_time += 1
- block.solve()
- self.tip = block.sha256
- self.height += 1
- test.blocks_and_transactions.append([block, True])
-
- block = create_block(self.tip, create_coinbase(self.height), self.block_time)
- self.block_time += 1
- block.vtx.append(test_build.tx2)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- block.solve()
- test.blocks_and_transactions.append([block, None])
- return test
-
- # This generates the tests for TestManager.
- def get_tests(self):
- self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
- self.block_time = 1333230000 # before the BIP16 switchover
- self.height = 1
-
- '''
- Create a new block with an anyone-can-spend coinbase
- '''
- block = create_block(self.tip, create_coinbase(self.height), self.block_time)
- self.block_time += 1
- block.solve()
- self.tip = block.sha256
- self.height += 1
- yield TestInstance(objects=[[block, True]])
-
- '''
- Build out to 100 blocks total, maturing the coinbase.
- '''
- test = TestInstance(objects=[], sync_every_block=False, sync_every_tx=False)
- for i in xrange(100):
- b = create_block(self.tip, create_coinbase(self.height), self.block_time)
- b.solve()
- test.blocks_and_transactions.append([b, True])
- self.tip = b.sha256
- self.block_time += 1
- self.height += 1
- yield test
-
- ''' Iterate through script tests. '''
- counter = 0
- for script_test in self.scripts.get_records():
- ''' Reset the blockchain to genesis block + 100 blocks. '''
- if self.nodes[0].getblockcount() > 101:
- self.nodes[0].invalidateblock(self.nodes[0].getblockhash(102))
- self.nodes[1].invalidateblock(self.nodes[1].getblockhash(102))
-
- self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
- self.height = 102
-
- [scriptsig, scriptpubkey, flags] = script_test[0:3]
- flags = ParseScriptFlags(flags)
-
- # We can use block time to determine whether the nodes should be
- # enforcing BIP16.
- #
- # We intentionally let the block time grow by 1 each time.
- # This forces the block hashes to differ between tests, so that
- # a call to invalidateblock doesn't interfere with a later test.
- if (flags & SCRIPT_VERIFY_P2SH):
- self.block_time = 1333238400 + counter # Advance to enforcing BIP16
- else:
- self.block_time = 1333230000 + counter # Before the BIP16 switchover
-
- print "Script test: [%s]" % script_test
-
- yield self.generate_test_instance(scriptpubkey, scriptsig)
- counter += 1
-
-if __name__ == '__main__':
- ScriptTest().main()
diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py
new file mode 100755
index 0000000000..d7f4292090
--- /dev/null
+++ b/qa/rpc-tests/sendheaders.py
@@ -0,0 +1,519 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import time
+from test_framework.blocktools import create_block, create_coinbase
+
+'''
+SendHeadersTest -- test behavior of headers messages to announce blocks.
+
+Setup:
+
+- Two nodes, two p2p connections to node0. One p2p connection should only ever
+ receive inv's (omitted from testing description below, this is our control).
+ Second node is used for creating reorgs.
+
+Part 1: No headers announcements before "sendheaders"
+a. node mines a block [expect: inv]
+ send getdata for the block [expect: block]
+b. node mines another block [expect: inv]
+ send getheaders and getdata [expect: headers, then block]
+c. node mines another block [expect: inv]
+ peer mines a block, announces with header [expect: getdata]
+d. node mines another block [expect: inv]
+
+Part 2: After "sendheaders", headers announcements should generally work.
+a. peer sends sendheaders [expect: no response]
+ peer sends getheaders with current tip [expect: no response]
+b. node mines a block [expect: tip header]
+c. for N in 1, ..., 10:
+ * for announce-type in {inv, header}
+ - peer mines N blocks, announces with announce-type
+ [ expect: getheaders/getdata or getdata, deliver block(s) ]
+ - node mines a block [ expect: 1 header ]
+
+Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
+- For response-type in {inv, getheaders}
+ * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
+ * node mines an 8-block reorg [ expect: inv at tip ]
+ * peer responds with getblocks/getdata [expect: inv, blocks ]
+ * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
+ * node mines another block at tip [ expect: inv ]
+ * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
+ * peer requests block [ expect: block ]
+ * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
+ * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
+ * node mines 1 block [expect: 1 header, peer responds with getdata]
+
+Part 4: Test direct fetch behavior
+a. Announce 2 old block headers.
+ Expect: no getdata requests.
+b. Announce 3 new blocks via 1 headers message.
+ Expect: one getdata request for all 3 blocks.
+ (Send blocks.)
+c. Announce 1 header that forks off the last two blocks.
+ Expect: no response.
+d. Announce 1 more header that builds on that fork.
+ Expect: one getdata request for two blocks.
+e. Announce 16 more headers that build on that fork.
+ Expect: getdata request for 14 more blocks.
+f. Announce 1 more header that builds on that fork.
+ Expect: no response.
+'''
+
+class BaseNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.create_callback_map()
+ self.connection = None
+ self.last_inv = None
+ self.last_headers = None
+ self.last_block = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong(0)
+ self.last_getdata = None
+ self.sleep_time = 0.05
+ self.block_announced = False
+
+ def clear_last_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_inv = None
+ self.last_headers = None
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ # Request data for a list of block hashes
+ def get_data(self, block_hashes):
+ msg = msg_getdata()
+ for x in block_hashes:
+ msg.inv.append(CInv(2, x))
+ self.connection.send_message(msg)
+
+ def get_headers(self, locator, hashstop):
+ msg = msg_getheaders()
+ msg.locator.vHave = locator
+ msg.hashstop = hashstop
+ self.connection.send_message(msg)
+
+ def send_block_inv(self, blockhash):
+ msg = msg_inv()
+ msg.inv = [CInv(2, blockhash)]
+ self.connection.send_message(msg)
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+ self.block_announced = True
+
+ def on_headers(self, conn, message):
+ self.last_headers = message
+ self.block_announced = True
+
+ def on_block(self, conn, message):
+ self.last_block = message.block
+ self.last_block.calc_sha256()
+
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ # Test whether the last announcement we received had the
+ # right header or the right inv
+ # inv and headers should be lists of block hashes
+ def check_last_announcement(self, headers=None, inv=None):
+ expect_headers = headers if headers != None else []
+ expect_inv = inv if inv != None else []
+ test_function = lambda: self.block_announced
+ self.sync(test_function)
+ with mininode_lock:
+ self.block_announced = False
+
+ success = True
+ compare_inv = []
+ if self.last_inv != None:
+ compare_inv = [x.hash for x in self.last_inv.inv]
+ if compare_inv != expect_inv:
+ success = False
+
+ hash_headers = []
+ if self.last_headers != None:
+ # treat headers as a list of block hashes
+ hash_headers = [ x.sha256 for x in self.last_headers.headers ]
+ if hash_headers != expect_headers:
+ success = False
+
+ self.last_inv = None
+ self.last_headers = None
+ return success
+
+ # Syncing helpers
+ def sync(self, test_function, timeout=60):
+ while timeout > 0:
+ with mininode_lock:
+ if test_function():
+ return
+ time.sleep(self.sleep_time)
+ timeout -= self.sleep_time
+ raise AssertionError("Sync failed to complete")
+
+ def sync_with_ping(self, timeout=60):
+ self.send_message(msg_ping(nonce=self.ping_counter))
+ test_function = lambda: self.last_pong.nonce == self.ping_counter
+ self.sync(test_function, timeout)
+ self.ping_counter += 1
+ return
+
+ def wait_for_block(self, blockhash, timeout=60):
+ test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
+ self.sync(test_function, timeout)
+ return
+
+ def wait_for_getdata(self, hash_list, timeout=60):
+ if hash_list == []:
+ return
+
+ test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
+ self.sync(test_function, timeout)
+ return
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
+ self.send_message(headers_message)
+
+ def send_getblocks(self, locator):
+ getblocks_message = msg_getblocks()
+ getblocks_message.locator.vHave = locator
+ self.send_message(getblocks_message)
+
+# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
+# "sendheaders" message.
+class InvNode(BaseNode):
+ def __init__(self):
+ BaseNode.__init__(self)
+
+# TestNode: This peer is the one we use for most of the testing.
+class TestNode(BaseNode):
+ def __init__(self):
+ BaseNode.__init__(self)
+
+class SendHeadersTest(BitcoinTestFramework):
+ def setup_chain(self):
+ initialize_chain_clean(self.options.tmpdir, 2)
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
+ connect_nodes(self.nodes[0], 1)
+
+ # mine count blocks and return the new tip
+ def mine_blocks(self, count):
+ self.nodes[0].generate(count)
+ return int(self.nodes[0].getbestblockhash(), 16)
+
+ # mine a reorg that invalidates length blocks (replacing them with
+ # length+1 blocks).
+ # peers is the p2p nodes we're using; we clear their state after the
+ # to-be-reorged-out blocks are mined, so that we don't break later tests.
+ # return the list of block hashes newly mined
+ def mine_reorg(self, length, peers):
+ self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
+ sync_blocks(self.nodes, wait=0.1)
+ [x.clear_last_announcement() for x in peers]
+
+ tip_height = self.nodes[1].getblockcount()
+ hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
+ self.nodes[1].invalidateblock(hash_to_invalidate)
+ all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
+ sync_blocks(self.nodes, wait=0.1)
+ return [int(x, 16) for x in all_hashes]
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ inv_node = InvNode()
+ test_node = TestNode()
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
+ # Set nServices to 0 for test_node, so no block download will occur outside of
+ # direct fetching
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
+ inv_node.add_connection(connections[0])
+ test_node.add_connection(connections[1])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ inv_node.wait_for_verack()
+ test_node.wait_for_verack()
+
+ tip = int(self.nodes[0].getbestblockhash(), 16)
+
+ # PART 1
+ # 1. Mine a block; expect inv announcements each time
+ print "Part 1: headers don't start before sendheaders message..."
+ for i in xrange(4):
+ old_tip = tip
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ # Try a few different responses; none should affect next announcement
+ if i == 0:
+ # first request the block
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip, timeout=5)
+ elif i == 1:
+ # next try requesting header and block
+ test_node.get_headers(locator=[old_tip], hashstop=tip)
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ test_node.clear_last_announcement() # since we requested headers...
+ elif i == 2:
+ # this time announce own block via headers
+ height = self.nodes[0].getblockcount()
+ last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+ block_time = last_time + 1
+ new_block = create_block(tip, create_coinbase(height+1), block_time)
+ new_block.solve()
+ test_node.send_header_for_blocks([new_block])
+ test_node.wait_for_getdata([new_block.sha256], timeout=5)
+ test_node.send_message(msg_block(new_block))
+ test_node.sync_with_ping() # make sure this block is processed
+ inv_node.clear_last_announcement()
+ test_node.clear_last_announcement()
+
+ print "Part 1: success!"
+ print "Part 2: announce blocks with headers after sendheaders message..."
+ # PART 2
+ # 2. Send a sendheaders message and test that headers announcements
+ # commence and keep working.
+ test_node.send_message(msg_sendheaders())
+ prev_tip = int(self.nodes[0].getbestblockhash(), 16)
+ test_node.get_headers(locator=[prev_tip], hashstop=0L)
+ test_node.sync_with_ping()
+ test_node.clear_last_announcement() # Clear out empty headers response
+
+ # Now that we've synced headers, headers announcements should work
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+
+ height = self.nodes[0].getblockcount()+1
+ block_time += 10 # Advance far enough ahead
+ for i in xrange(10):
+ # Mine i blocks, and alternate announcing either via
+ # inv (of tip) or via headers. After each, new blocks
+ # mined by the node should successfully be announced
+ # with block header, even though the blocks are never requested
+ for j in xrange(2):
+ blocks = []
+ for b in xrange(i+1):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ if j == 0:
+ # Announce via inv
+ test_node.send_block_inv(tip)
+ test_node.wait_for_getdata([tip], timeout=5)
+ # Test that duplicate inv's won't result in duplicate
+ # getdata requests, or duplicate headers announcements
+ inv_node.send_block_inv(tip)
+ # Should have received a getheaders as well!
+ test_node.send_header_for_blocks(blocks)
+ test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
+ [ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
+ inv_node.sync_with_ping()
+ else:
+ # Announce via headers
+ test_node.send_header_for_blocks(blocks)
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
+ # Test that duplicate headers won't result in duplicate
+ # getdata requests (the check is further down)
+ inv_node.send_header_for_blocks(blocks)
+ inv_node.sync_with_ping()
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+ test_node.sync_with_ping()
+ inv_node.sync_with_ping()
+ # This block should not be announced to the inv node (since it also
+ # broadcast it)
+ assert_equal(inv_node.last_inv, None)
+ assert_equal(inv_node.last_headers, None)
+ inv_node.clear_last_announcement()
+ test_node.clear_last_announcement()
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+ height += 1
+ block_time += 1
+
+ print "Part 2: success!"
+
+ print "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
+
+ # PART 3. Headers announcements can stop after large reorg, and resume after
+ # getheaders or inv from peer.
+ for j in xrange(2):
+ # First try mining a reorg that can propagate with header announcement
+ new_block_hashes = self.mine_reorg(length=7, peers=[test_node, inv_node])
+ tip = new_block_hashes[-1]
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
+
+ block_time += 8
+
+ # Mine a too-large reorg, which should be announced with a single inv
+ new_block_hashes = self.mine_reorg(length=8, peers=[test_node, inv_node])
+ tip = new_block_hashes[-1]
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+
+ block_time += 9
+
+ fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
+ fork_point = int(fork_point, 16)
+
+ # Use getblocks/getdata
+ test_node.send_getblocks(locator = [fork_point])
+ assert_equal(test_node.check_last_announcement(inv=new_block_hashes[0:-1]), True)
+ test_node.get_data(new_block_hashes)
+ test_node.wait_for_block(new_block_hashes[-1])
+
+ for i in xrange(3):
+ # Mine another block, still should get only an inv
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ if i == 0:
+ # Just get the data -- shouldn't cause headers announcements to resume
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ elif i == 1:
+ # Send a getheaders message that shouldn't trigger headers announcements
+ # to resume (best header sent will be too old)
+ test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ test_node.clear_last_announcement()
+ elif i == 2:
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ # This time, try sending either a getheaders to trigger resumption
+ # of headers announcements, or mine a new block and inv it, also
+ # triggering resumption of headers announcements.
+ if j == 0:
+ test_node.get_headers(locator=[tip], hashstop=0L)
+ test_node.sync_with_ping()
+ else:
+ test_node.send_block_inv(tip)
+ test_node.sync_with_ping()
+ # New blocks should now be announced with header
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+
+ print "Part 3: success!"
+
+ print "Part 4: Testing direct fetch behavior..."
+ tip = self.mine_blocks(1)
+ height = self.nodes[0].getblockcount() + 1
+ last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+ block_time = last_time + 1
+
+ # Create 2 blocks. Send the blocks, then send the headers.
+ blocks = []
+ for b in xrange(2):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ inv_node.send_message(msg_block(blocks[-1]))
+
+ inv_node.sync_with_ping() # Make sure blocks are processed
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks);
+ test_node.sync_with_ping()
+ # should not have received any getdata messages
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ # This time, direct fetch should work
+ blocks = []
+ for b in xrange(3):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+
+ test_node.send_header_for_blocks(blocks)
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
+
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+
+ test_node.sync_with_ping()
+
+ # Now announce a header that forks the last two blocks
+ tip = blocks[0].sha256
+ height -= 1
+ blocks = []
+
+ # Create extra blocks for later
+ for b in xrange(20):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+
+ # Announcing one block on fork should not trigger direct fetch
+ # (less work than tip)
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks[0:1])
+ test_node.sync_with_ping()
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ # Announcing one more block on fork should trigger direct fetch for
+ # both blocks (same work as tip)
+ test_node.send_header_for_blocks(blocks[1:2])
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
+
+ # Announcing 16 more headers should trigger direct fetch for 14 more
+ # blocks
+ test_node.send_header_for_blocks(blocks[2:18])
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
+
+ # Announcing 1 more header should not trigger any response
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks[18:19])
+ test_node.sync_with_ping()
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ print "Part 4: success!"
+
+ # Finally, check that the inv node never received a getdata request,
+ # throughout the test
+ assert_equal(inv_node.last_getdata, None)
+
+if __name__ == '__main__':
+ SendHeadersTest().main()
diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py
index c15c5fda09..ecfffc1b45 100755
--- a/qa/rpc-tests/smartfees.py
+++ b/qa/rpc-tests/smartfees.py
@@ -120,15 +120,26 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
last_e = e
valid_estimate = False
invalid_estimates = 0
- for e in all_estimates:
+ for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
+ # estimatesmartfee should return the same result
+ assert_equal(node.estimatesmartfee(i+1)["feerate"], e)
+
else:
invalid_estimates += 1
- # Once we're at a high enough confirmation count that we can give an estimate
- # We should have estimates for all higher confirmation counts
- if valid_estimate and e < 0:
- raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
+
+ # estimatesmartfee should still be valid
+ approx_estimate = node.estimatesmartfee(i+1)["feerate"]
+ answer_found = node.estimatesmartfee(i+1)["blocks"]
+ assert(approx_estimate > 0)
+ assert(answer_found > i+1)
+
+ # Once we're at a high enough confirmation count that we can give an estimate
+ # We should have estimates for all higher confirmation counts
+ if valid_estimate:
+ raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
+
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
@@ -184,13 +195,13 @@ class EstimateFeeTest(BitcoinTestFramework):
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
- ["-blockprioritysize=1500", "-blockmaxsize=18000",
+ ["-blockprioritysize=1500", "-blockmaxsize=17000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
- # produces too small blocks (room for only 70 or so transactions)
- node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
+ # produces too small blocks (room for only 55 or so transactions)
+ node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
@@ -229,22 +240,19 @@ class EstimateFeeTest(BitcoinTestFramework):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
- print("Checking estimates for 1/2/3/6/15/25 blocks")
- print("Creating transactions and mining them with a huge block size")
- # Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
- self.transact_and_mine(30, self.nodes[0])
- check_estimates(self.nodes[1], self.fees_per_kb, 1)
+ print("Will output estimates for 1/2/3/6/15/25 blocks")
- print("Creating transactions and mining them with a block size that can't keep up")
- # Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
- self.transact_and_mine(20, self.nodes[2])
- check_estimates(self.nodes[1], self.fees_per_kb, 3)
+ for i in xrange(2):
+ print("Creating transactions and mining them with a block size that can't keep up")
+ # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
+ self.transact_and_mine(10, self.nodes[2])
+ check_estimates(self.nodes[1], self.fees_per_kb, 14)
- print("Creating transactions and mining them at a block size that is just big enough")
- # Generate transactions while mining 40 more blocks, this time with node1
- # which mines blocks with capacity just above the rate that transactions are being created
- self.transact_and_mine(40, self.nodes[1])
- check_estimates(self.nodes[1], self.fees_per_kb, 2)
+ print("Creating transactions and mining them at a block size that is just big enough")
+ # Generate transactions while mining 10 more blocks, this time with node1
+ # which mines blocks with capacity just above the rate that transactions are being created
+ self.transact_and_mine(10, self.nodes[1])
+ check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index b7d78e74fa..64985d58e2 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -751,8 +751,8 @@ class msg_inv(object):
class msg_getdata(object):
command = "getdata"
- def __init__(self):
- self.inv = []
+ def __init__(self, inv=None):
+ self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
@@ -905,6 +905,20 @@ class msg_mempool(object):
def __repr__(self):
return "msg_mempool()"
+class msg_sendheaders(object):
+ command = "sendheaders"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_sendheaders()"
# getheaders message has
# number of entries
@@ -990,6 +1004,17 @@ class NodeConnCB(object):
def __init__(self):
self.verack_received = False
+ # Spin until verack message is received from the node.
+ # Tests may want to use this as a signal that the test can begin.
+ # This can be called from the testing thread, so it needs to acquire the
+ # global lock.
+ def wait_for_verack(self):
+ while True:
+ with mininode_lock:
+ if self.verack_received:
+ return
+ time.sleep(0.05)
+
# Derived classes should call this function once to set the message map
# which associates the derived classes' functions to incoming messages
def create_callback_map(self):
@@ -1084,7 +1109,7 @@ class NodeConn(asyncore.dispatcher):
"regtest": "\xfa\xbf\xb5\xda" # regtest
}
- def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"):
+ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
@@ -1102,6 +1127,7 @@ class NodeConn(asyncore.dispatcher):
# stuff version msg into sendbuf
vt = msg_version()
+ vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"